--- a/contrib/automation/hgautomation/cli.py Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/automation/hgautomation/cli.py Fri Feb 18 14:27:43 2022 +0100
@@ -158,7 +158,7 @@
windows.synchronize_hg(SOURCE_ROOT, revision, instance)
- for py_version in ("2.7", "3.7", "3.8", "3.9"):
+ for py_version in ("2.7", "3.7", "3.8", "3.9", "3.10"):
for arch in ("x86", "x64"):
windows.purge_hg(winrm_client)
windows.build_wheel(
@@ -377,7 +377,7 @@
sp.add_argument(
'--python-version',
help='Python version to build for',
- choices={'2.7', '3.7', '3.8', '3.9'},
+ choices={'2.7', '3.7', '3.8', '3.9', '3.10'},
nargs='*',
default=['3.8'],
)
@@ -501,7 +501,7 @@
sp.add_argument(
'--python-version',
help='Python version to use',
- choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'},
+ choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10'},
default='2.7',
)
sp.add_argument(
--- a/contrib/automation/hgautomation/windows.py Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/automation/hgautomation/windows.py Fri Feb 18 14:27:43 2022 +0100
@@ -129,6 +129,8 @@
WHEEL_FILENAME_PYTHON38_X64 = 'mercurial-{version}-cp38-cp38-win_amd64.whl'
WHEEL_FILENAME_PYTHON39_X86 = 'mercurial-{version}-cp39-cp39-win32.whl'
WHEEL_FILENAME_PYTHON39_X64 = 'mercurial-{version}-cp39-cp39-win_amd64.whl'
+WHEEL_FILENAME_PYTHON310_X86 = 'mercurial-{version}-cp310-cp310-win32.whl'
+WHEEL_FILENAME_PYTHON310_X64 = 'mercurial-{version}-cp310-cp310-win_amd64.whl'
EXE_FILENAME_PYTHON2_X86 = 'Mercurial-{version}-x86-python2.exe'
EXE_FILENAME_PYTHON2_X64 = 'Mercurial-{version}-x64-python2.exe'
@@ -480,6 +482,8 @@
dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version),
dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version),
dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version),
+ dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version),
+ dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version),
)
@@ -493,6 +497,8 @@
dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version),
dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version),
dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version),
+ dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version),
+ dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version),
dist_path / EXE_FILENAME_PYTHON2_X86.format(version=version),
dist_path / EXE_FILENAME_PYTHON2_X64.format(version=version),
dist_path / EXE_FILENAME_PYTHON3_X86.format(version=version),
--- a/contrib/heptapod-ci.yml Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/heptapod-ci.yml Fri Feb 18 14:27:43 2022 +0100
@@ -56,6 +56,11 @@
phabricator-refresh:
stage: phabricator
+ rules:
+ - if: '"$PHABRICATOR_TOKEN" != "NO-PHAB"'
+ when: on_success
+ - if: '"$PHABRICATOR_TOKEN" == "NO-PHAB"'
+ when: never
variables:
DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)"
STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}"
--- a/contrib/install-windows-dependencies.ps1 Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/install-windows-dependencies.ps1 Fri Feb 18 14:27:43 2022 +0100
@@ -29,10 +29,15 @@
$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe"
$PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a"
-$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe"
-$PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de"
-$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe"
-$PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9.exe"
+$PYTHON39_x86_SHA256 = "6646a5683adf14d35e8c53aab946895bc0f0b825f7acac3a62cc85ee7d0dc71a"
+$PYTHON39_X64_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9-amd64.exe"
+$PYTHON39_x64_SHA256 = "137d59e5c0b01a8f1bdcba08344402ae658c81c6bf03b6602bd8b4e951ad0714"
+
+$PYTHON310_x86_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0.exe"
+$PYTHON310_x86_SHA256 = "ea896eeefb1db9e12fb89ec77a6e28c9fe52b4a162a34c85d9688be2ec2392e8"
+$PYTHON310_X64_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe"
+$PYTHON310_x64_SHA256 = "cb580eb7dc55f9198e650f016645023e8b2224cf7d033857d12880b46c5c94ef"
# PIP 19.2.3.
$PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
@@ -132,6 +137,8 @@
Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256
Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256
Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256
+ Secure-Download $PYTHON310_x86_URL ${prefix}\assets\python310-x86.exe $PYTHON310_x86_SHA256
+ Secure-Download $PYTHON310_x64_URL ${prefix}\assets\python310-x64.exe $PYTHON310_x64_SHA256
Secure-Download $PIP_URL ${pip} $PIP_SHA256
Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256
Secure-Download $INNO_SETUP_URL ${prefix}\assets\InnoSetup.exe $INNO_SETUP_SHA256
@@ -146,6 +153,8 @@
# Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip}
Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip}
Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip}
+ Install-Python3 "Python 3.10 32-bit" ${prefix}\assets\python310-x86.exe ${prefix}\python310-x86 ${pip}
+ Install-Python3 "Python 3.10 64-bit" ${prefix}\assets\python310-x64.exe ${prefix}\python310-x64 ${pip}
Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
--- a/contrib/packaging/requirements-windows-py3.txt Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/packaging/requirements-windows-py3.txt Fri Feb 18 14:27:43 2022 +0100
@@ -1,68 +1,84 @@
#
-# This file is autogenerated by pip-compile
+# This file is autogenerated by pip-compile with python 3.7
# To update, run:
#
# pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in
#
atomicwrites==1.4.0 \
--hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \
- --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a \
+ --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a
# via pytest
attrs==21.2.0 \
--hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \
- --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb \
+ --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb
# via pytest
cached-property==1.5.2 \
--hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
- --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
+ --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0
# via pygit2
certifi==2021.5.30 \
--hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
- --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
+ --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8
# via dulwich
-cffi==1.14.4 \
- --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
- --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \
- --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \
- --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \
- --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \
- --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \
- --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \
- --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \
- --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \
- --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \
- --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \
- --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \
- --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \
- --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \
- --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \
- --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \
- --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \
- --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \
- --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \
- --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \
- --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \
- --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \
- --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \
- --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \
- --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \
- --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \
- --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \
- --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \
- --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \
- --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \
- --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \
- --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \
- --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \
- --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \
+cffi==1.15.0 \
+ --hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \
+ --hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \
+ --hash=sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636 \
+ --hash=sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20 \
+ --hash=sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728 \
+ --hash=sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27 \
+ --hash=sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66 \
+ --hash=sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443 \
+ --hash=sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0 \
+ --hash=sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7 \
+ --hash=sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39 \
+ --hash=sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605 \
+ --hash=sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a \
+ --hash=sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37 \
+ --hash=sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029 \
+ --hash=sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139 \
+ --hash=sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc \
+ --hash=sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df \
+ --hash=sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14 \
+ --hash=sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880 \
+ --hash=sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2 \
+ --hash=sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a \
+ --hash=sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e \
+ --hash=sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474 \
+ --hash=sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024 \
+ --hash=sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8 \
+ --hash=sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0 \
+ --hash=sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e \
+ --hash=sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a \
+ --hash=sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e \
+ --hash=sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032 \
+ --hash=sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6 \
+ --hash=sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e \
+ --hash=sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b \
+ --hash=sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e \
+ --hash=sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954 \
+ --hash=sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962 \
+ --hash=sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c \
+ --hash=sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4 \
+ --hash=sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55 \
+ --hash=sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962 \
+ --hash=sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023 \
+ --hash=sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c \
+ --hash=sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6 \
+ --hash=sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8 \
+ --hash=sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382 \
+ --hash=sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7 \
+ --hash=sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc \
+ --hash=sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997 \
+ --hash=sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796
# via pygit2
colorama==0.4.4 \
--hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \
- --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \
+ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2
# via pytest
docutils==0.16 \
--hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
- --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc
# via -r contrib/packaging/requirements-windows.txt.in
dulwich==0.20.6 ; python_version >= "3" \
--hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \
@@ -77,26 +93,29 @@
--hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \
--hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \
--hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \
- --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b \
+ --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b
# via -r contrib/packaging/requirements-windows.txt.in
fuzzywuzzy==0.18.0 \
- --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+ --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8
# via -r contrib/packaging/requirements-windows.txt.in
idna==3.2 \
--hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \
- --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 \
+ --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3
# via yarl
importlib-metadata==3.1.0 \
--hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \
- --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 \
- # via keyring, pluggy, pytest
+ --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099
+ # via
+ # keyring
+ # pluggy
+ # pytest
iniconfig==1.1.1 \
--hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \
- --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 \
+ --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32
# via pytest
keyring==21.4.0 \
--hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \
- --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \
+ --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466
# via -r contrib/packaging/requirements-windows.txt.in
multidict==5.1.0 \
--hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
@@ -135,62 +154,68 @@
--hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
--hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
--hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
- --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \
+ --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80
# via yarl
packaging==21.0 \
--hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \
- --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \
+ --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14
# via pytest
pluggy==0.13.1 \
--hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \
- --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d \
+ --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d
# via pytest
py==1.10.0 \
--hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \
- --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a \
+ --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a
# via pytest
-pycparser==2.20 \
- --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \
- --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \
+pycparser==2.21 \
+ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
+ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
# via cffi
-pygit2==1.4.0 ; python_version >= "3" \
- --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \
- --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \
- --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \
- --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \
- --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \
- --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \
- --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \
- --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \
- --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \
- --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \
- --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \
- --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \
- --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \
- --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \
- --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \
- --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \
- --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \
+pygit2==1.7.1 ; python_version >= "3" \
+ --hash=sha256:2c9e95efb86c0b32cc07c26be3d179e851ca4a7899c47fef63c4203963144f5e \
+ --hash=sha256:3ddacbf461652d3d4900382f821d9fbd5ae2dedecd7862b5245842419ad0ccba \
+ --hash=sha256:4cb0414df6089d0072ebe93ff2f34730737172dd5f0e72289567d06a6caf09c0 \
+ --hash=sha256:56e960dc74f4582bfa3ca17a1a9d542732fc93b5cf8f82574c235d06b2d61eae \
+ --hash=sha256:6b17ab922c2a2d99b30ab9222472b07732bf7261d9f9655a4ea23b4c700049d8 \
+ --hash=sha256:73a7b471f22cb59e8729016de1f447c472b3b2c1cc2b622194e5e3b48a7f5776 \
+ --hash=sha256:761a8850e33822796c1c24d411d5cc2460c04e1a74b04ae8560efd3596bbd6bd \
+ --hash=sha256:7c467e81158f5827b3bca6362e5cc9b92857eff9de65034d338c1f18524b09be \
+ --hash=sha256:7c56e10592e62610a19bd3e2a633aafe3488c57b906c7c2fde0299937f0f0b2f \
+ --hash=sha256:7cc2a8e29cc9598310a78cf58b70d9331277cf374802be8f97d97c4a9e5d8387 \
+ --hash=sha256:812670f7994f31778e873a9eced29d2bbfa91674e8be0ab1e974c8a4bda9cbab \
+ --hash=sha256:8cdb0b1d6c3d24b44f340fed143b16e64ba23fe2a449f1a5db87aaf9339a9dbe \
+ --hash=sha256:91b77a305d8d18b649396e66e832d654cd593a3d29b5728f753f254a04533812 \
+ --hash=sha256:a75bcde32238c77eb0cf7d9698a5aa899408d7ad999a5920a29a7c4b80fdeaa7 \
+ --hash=sha256:b060240cf3038e7a0706bbfc5436dd03b8d5ac797ac1d512b613f4d04b974c80 \
+ --hash=sha256:cdfa61c0428a8182e5a6a1161c017b824cd511574f080a40b10d6413774eb0ca \
+ --hash=sha256:d7faa29558436decc2e78110f38d6677eb366b683ba5cdc2803d47195711165d \
+ --hash=sha256:d831825ad9c3b3c28e6b3ef8a2401ad2d3fd4db5455427ff27175a7e254e2592 \
+ --hash=sha256:df4c477bdfac85d32a1e3180282cd829a0980aa69be9bd0f7cbd4db1778ca72b \
+ --hash=sha256:eced3529bafcaaac015d08dfaa743b3cbad37fcd5b13ae9d280b8b7f716ec5ce \
+ --hash=sha256:fec17e2da668e6bb192d777417aad9c7ca924a166d0a0b9a81a11e00362b1bc7
# via -r contrib/packaging/requirements-windows.txt.in
pygments==2.7.1 \
--hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
- --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+ --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7
# via -r contrib/packaging/requirements-windows.txt.in
pyparsing==2.4.7 \
--hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \
- --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \
+ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b
# via packaging
-pytest-vcr==1.0.2 \
- --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \
- # via -r contrib/packaging/requirements-windows.txt.in
pytest==6.2.4 \
--hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \
- --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 \
+ --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890
# via pytest-vcr
+pytest-vcr==1.0.2 \
+ --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896
+ # via -r contrib/packaging/requirements-windows.txt.in
pywin32-ctypes==0.2.0 \
--hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
- --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
- # via -r contrib/packaging/requirements-windows.txt.in, keyring
+ --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98
+ # via
+ # -r contrib/packaging/requirements-windows.txt.in
+ # keyring
pyyaml==5.4.1 \
--hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
--hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
@@ -220,41 +245,43 @@
--hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
--hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
--hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
- --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 \
+ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0
# via vcrpy
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
- --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
# via vcrpy
toml==0.10.2 \
--hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
- --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
# via pytest
typing-extensions==3.10.0.0 \
--hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
--hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
- --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \
+ --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84
# via yarl
urllib3==1.25.11 \
--hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \
- --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \
+ --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e
# via dulwich
vcrpy==4.1.1 \
--hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
- --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 \
+ --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
# via pytest-vcr
-windows-curses==2.2.0 \
- --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \
- --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \
- --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \
- --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \
- --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \
- --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \
- --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \
- --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \
+windows-curses==2.3.0 \
+ --hash=sha256:170c0d941c2e0cdf864e7f0441c1bdf0709232bf4aa7ce7f54d90fc76a4c0504 \
+ --hash=sha256:4d5fb991d1b90a41c2332f02241a1f84c8a1e6bc8f6e0d26f532d0da7a9f7b51 \
+ --hash=sha256:7a35eda4cb120b9e1a5ae795f3bc06c55b92c9d391baba6be1903285a05f3551 \
+ --hash=sha256:935be95cfdb9213f6f5d3d5bcd489960e3a8fbc9b574e7b2e8a3a3cc46efff49 \
+ --hash=sha256:a3a63a0597729e10f923724c2cf972a23ea677b400d2387dee1d668cf7116177 \
+ --hash=sha256:c860f596d28377e47f322b7382be4d3573fd76d1292234996bb7f72e0bc0ed0d \
+ --hash=sha256:cc5fa913780d60f4a40824d374a4f8ca45b4e205546e83a2d85147315a57457e \
+ --hash=sha256:d5cde8ec6d582aa77af791eca54f60858339fb3f391945f9cad11b1ab71062e3 \
+ --hash=sha256:e913dc121446d92b33fe4f5bcca26d3a34e4ad19f2af160370d57c3d1e93b4e1 \
+ --hash=sha256:fbc2131cec57e422c6660e6cdb3420aff5be5169b8e45bb7c471f884b0590a2b
# via -r contrib/packaging/requirements-windows.txt.in
wrapt==1.12.1 \
- --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
+ --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
# via vcrpy
yarl==1.6.3 \
--hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
@@ -293,9 +320,9 @@
--hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
--hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
--hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
- --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \
+ --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71
# via vcrpy
zipp==3.4.0 \
--hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \
- --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb \
+ --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb
# via importlib-metadata
--- a/contrib/packaging/requirements.txt Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/packaging/requirements.txt Fri Feb 18 14:27:43 2022 +0100
@@ -1,16 +1,16 @@
#
-# This file is autogenerated by pip-compile
+# This file is autogenerated by pip-compile with python 3.7
# To update, run:
#
# pip-compile --generate-hashes --output-file=contrib/packaging/requirements.txt contrib/packaging/requirements.txt.in
#
docutils==0.16 \
--hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
- --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc
# via -r contrib/packaging/requirements.txt.in
jinja2==2.11.2 \
--hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \
- --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 \
+ --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035
# via -r contrib/packaging/requirements.txt.in
markupsafe==1.1.1 \
--hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \
@@ -45,5 +45,5 @@
--hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \
--hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \
--hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \
- --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be \
+ --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be
# via jinja2
--- a/contrib/phab-refresh-stack.sh Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/phab-refresh-stack.sh Fri Feb 18 14:27:43 2022 +0100
@@ -1,6 +1,11 @@
#!/bin/bash
set -eu
+if [[ "$PHABRICATOR_TOKEN" == "NO-PHAB" ]]; then
+ echo 'Skipping Phabricator Step' >&2
+ exit 0
+fi
+
revision_in_stack=`hg log \
--rev '.#stack and ::. and topic()' \
-T '\nONE-REV\n' \
@@ -27,6 +32,7 @@
if [[ "$PHABRICATOR_TOKEN" == "" ]]; then
echo 'missing $PHABRICATOR_TOKEN variable' >&2
+ echo '(use PHABRICATOR_TOKEN="NO-PHAB" to disable this step)' >&2
exit 2
fi
--- a/contrib/simplemerge Fri Feb 18 12:55:39 2022 +0100
+++ b/contrib/simplemerge Fri Feb 18 14:27:43 2022 +0100
@@ -13,9 +13,9 @@
context,
error,
fancyopts,
- pycompat,
simplemerge,
ui as uimod,
+ util,
)
from mercurial.utils import procutil, stringutil
@@ -65,6 +65,17 @@
procutil.stdout.write(b' %-*s %s\n' % (opts_len, first, second))
+def _verifytext(input, ui, quiet=False, allow_binary=False):
+ """verifies that text is non-binary (unless opts[text] is passed,
+ then we just warn)"""
+ if stringutil.binary(input.text()):
+ msg = _(b"%s looks like a binary file.") % input.fctx.path()
+ if not quiet:
+ ui.warn(_(b'warning: %s\n') % msg)
+ if not allow_binary:
+ sys.exit(1)
+
+
try:
for fp in (sys.stdin, procutil.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -80,16 +91,44 @@
sys.exit(0)
if len(args) != 3:
raise ParseError(_(b'wrong number of arguments').decode('utf8'))
+ mode = b'merge'
+ if len(opts[b'label']) > 2:
+ mode = b'merge3'
local, base, other = args
- sys.exit(
- simplemerge.simplemerge(
- uimod.ui.load(),
- context.arbitraryfilectx(local),
- context.arbitraryfilectx(base),
- context.arbitraryfilectx(other),
- **pycompat.strkwargs(opts)
- )
+ overrides = opts[b'label']
+ if len(overrides) > 3:
+ raise error.InputError(b'can only specify three labels.')
+ labels = [local, other, base]
+ labels[: len(overrides)] = overrides
+ local_input = simplemerge.MergeInput(
+ context.arbitraryfilectx(local), labels[0]
+ )
+ other_input = simplemerge.MergeInput(
+ context.arbitraryfilectx(other), labels[1]
+ )
+ base_input = simplemerge.MergeInput(
+ context.arbitraryfilectx(base), labels[2]
)
+
+ quiet = opts.get(b'quiet')
+ allow_binary = opts.get(b'text')
+ ui = uimod.ui.load()
+ _verifytext(local_input, ui, quiet=quiet, allow_binary=allow_binary)
+ _verifytext(base_input, ui, quiet=quiet, allow_binary=allow_binary)
+ _verifytext(other_input, ui, quiet=quiet, allow_binary=allow_binary)
+
+ merged_text, conflicts = simplemerge.simplemerge(
+ local_input,
+ base_input,
+ other_input,
+ mode,
+ allow_binary=allow_binary,
+ )
+ if opts.get(b'print'):
+ ui.fout.write(merged_text)
+ else:
+ util.writefile(local, merged_text)
+ sys.exit(1 if conflicts else 0)
except ParseError as e:
e = stringutil.forcebytestr(e)
procutil.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e))
--- a/hgext/blackbox.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/blackbox.py Fri Feb 18 14:27:43 2022 +0100
@@ -36,7 +36,7 @@
maxfiles = 3
[blackbox]
- # Include nanoseconds in log entries with %f (see Python function
+ # Include microseconds in log entries with %f (see Python function
# datetime.datetime.strftime)
date-format = %Y-%m-%d @ %H:%M:%S.%f
@@ -101,11 +101,7 @@
b'ignore',
default=lambda: [b'chgserver', b'cmdserver', b'extension'],
)
-configitem(
- b'blackbox',
- b'date-format',
- default=b'%Y/%m/%d %H:%M:%S',
-)
+configitem(b'blackbox', b'date-format', default=b'')
_lastlogger = loggingutil.proxylogger()
@@ -138,7 +134,14 @@
def _log(self, ui, event, msg, opts):
default = ui.configdate(b'devel', b'default-date')
- date = dateutil.datestr(default, ui.config(b'blackbox', b'date-format'))
+ dateformat = ui.config(b'blackbox', b'date-format')
+ if dateformat:
+ date = dateutil.datestr(default, dateformat)
+ else:
+ # We want to display milliseconds (more precision seems
+ # unnecessary). Since %.3f is not supported, use %f and truncate
+ # microseconds.
+ date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3]
user = procutil.getuser()
pid = b'%d' % procutil.getpid()
changed = b''
@@ -224,8 +227,14 @@
if count >= limit:
break
- # count the commands by matching lines like: 2013/01/23 19:13:36 root>
- if re.match(br'^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
+ # count the commands by matching lines like:
+ # 2013/01/23 19:13:36 root>
+ # 2013/01/23 19:13:36 root (1234)>
+ # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)>
+ # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)>
+ if re.match(
+ br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line
+ ):
count += 1
output.append(line)
--- a/hgext/commitextras.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/commitextras.py Fri Feb 18 14:27:43 2022 +0100
@@ -65,23 +65,23 @@
b"unable to parse '%s', should follow "
b"KEY=VALUE format"
)
- raise error.Abort(msg % raw)
+ raise error.InputError(msg % raw)
k, v = raw.split(b'=', 1)
if not k:
msg = _(b"unable to parse '%s', keys can't be empty")
- raise error.Abort(msg % raw)
+ raise error.InputError(msg % raw)
if re.search(br'[^\w-]', k):
msg = _(
b"keys can only contain ascii letters, digits,"
b" '_' and '-'"
)
- raise error.Abort(msg)
+ raise error.InputError(msg)
if k in usedinternally:
msg = _(
b"key '%s' is used internally, can't be set "
b"manually"
)
- raise error.Abort(msg % k)
+ raise error.InputError(msg % k)
inneropts['extra'][k] = v
return super(repoextra, self).commit(*innerpats, **inneropts)
--- a/hgext/convert/hg.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/convert/hg.py Fri Feb 18 14:27:43 2022 +0100
@@ -38,6 +38,7 @@
lock as lockmod,
logcmdutil,
merge as mergemod,
+ mergestate,
phases,
pycompat,
util,
@@ -241,7 +242,7 @@
# If the file requires actual merging, abort. We don't have enough
# context to resolve merges correctly.
- if action in [b'm', b'dm', b'cd', b'dc']:
+ if action in mergestate.CONVERT_MERGE_ACTIONS:
raise error.Abort(
_(
b"unable to convert merge commit "
@@ -250,7 +251,7 @@
)
% (file, p1ctx, p2ctx)
)
- elif action == b'k':
+ elif action == mergestate.ACTION_KEEP:
# 'keep' means nothing changed from p1
continue
else:
--- a/hgext/fix.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/fix.py Fri Feb 18 14:27:43 2022 +0100
@@ -149,7 +149,6 @@
mdiff,
merge,
mergestate as mergestatemod,
- obsolete,
pycompat,
registrar,
rewriteutil,
@@ -463,8 +462,6 @@
revs = set(logcmdutil.revrange(repo, opts[b'rev']))
if opts.get(b'working_dir'):
revs.add(wdirrev)
- for rev in revs:
- checkfixablectx(ui, repo, repo[rev])
# Allow fixing only wdir() even if there's an unfinished operation
if not (len(revs) == 1 and wdirrev in revs):
cmdutil.checkunfinished(repo)
@@ -481,16 +478,6 @@
return revs
-def checkfixablectx(ui, repo, ctx):
- """Aborts if the revision shouldn't be replaced with a fixed one."""
- if ctx.obsolete():
- # It would be better to actually check if the revision has a successor.
- if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
- raise error.Abort(
- b'fixing obsolete revision could cause divergence'
- )
-
-
def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
"""Returns the set of files that should be fixed in a context
--- a/hgext/git/__init__.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/git/__init__.py Fri Feb 18 14:27:43 2022 +0100
@@ -51,6 +51,7 @@
class gitstore(object): # store.basicstore):
def __init__(self, path, vfstype):
self.vfs = vfstype(path)
+ self.opener = self.vfs
self.path = self.vfs.base
self.createmode = store._calcmode(self.vfs)
# above lines should go away in favor of:
--- a/hgext/git/dirstate.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/git/dirstate.py Fri Feb 18 14:27:43 2022 +0100
@@ -257,7 +257,7 @@
if match(p):
yield p
- def set_clean(self, f, parentfiledata=None):
+ def set_clean(self, f, parentfiledata):
"""Mark a file normal and clean."""
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
--- a/hgext/histedit.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/histedit.py Fri Feb 18 14:27:43 2022 +0100
@@ -667,7 +667,15 @@
repo.ui.setconfig(
b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
)
- stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
+ stats = mergemod.graft(
+ repo,
+ ctx,
+ labels=[
+ b'already edited',
+ b'current change',
+ b'parent of current change',
+ ],
+ )
finally:
repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
return stats
@@ -1324,6 +1332,10 @@
d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
"""
+ if self.later_on_top:
+ help += b"Newer commits are shown above older commits.\n"
+ else:
+ help += b"Older commits are shown above newer commits.\n"
return help.splitlines()
def render_help(self, win):
--- a/hgext/keyword.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/keyword.py Fri Feb 18 14:27:43 2022 +0100
@@ -116,6 +116,7 @@
dateutil,
stringutil,
)
+from mercurial.dirstateutils import timestamp
cmdtable = {}
command = registrar.command(cmdtable)
@@ -326,6 +327,7 @@
msg = _(b'overwriting %s expanding keywords\n')
else:
msg = _(b'overwriting %s shrinking keywords\n')
+ wctx = self.repo[None]
for f in candidates:
if self.restrict:
data = self.repo.file(f).read(mf[f])
@@ -356,7 +358,12 @@
fp.write(data)
fp.close()
if kwcmd:
- self.repo.dirstate.set_clean(f)
+ s = wctx[f].lstat()
+ mode = s.st_mode
+ size = s.st_size
+ mtime = timestamp.mtime_of(s)
+ cache_data = (mode, size, mtime)
+ self.repo.dirstate.set_clean(f, cache_data)
elif self.postcommit:
self.repo.dirstate.update_file_p1(f, p1_tracked=True)
--- a/hgext/largefiles/lfutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/largefiles/lfutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -32,6 +32,7 @@
vfs as vfsmod,
)
from mercurial.utils import hashutil
+from mercurial.dirstateutils import timestamp
shortname = b'.hglf'
shortnameslash = shortname + b'/'
@@ -243,10 +244,11 @@
def lfdirstatestatus(lfdirstate, repo):
pctx = repo[b'.']
match = matchmod.always()
- unsure, s = lfdirstate.status(
+ unsure, s, mtime_boundary = lfdirstate.status(
match, subrepos=[], ignored=False, clean=False, unknown=False
)
modified, clean = s.modified, s.clean
+ wctx = repo[None]
for lfile in unsure:
try:
fctx = pctx[standin(lfile)]
@@ -256,7 +258,13 @@
modified.append(lfile)
else:
clean.append(lfile)
- lfdirstate.set_clean(lfile)
+ st = wctx[lfile].lstat()
+ mode = st.st_mode
+ size = st.st_size
+ mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
+ if mtime is not None:
+ cache_data = (mode, size, mtime)
+ lfdirstate.set_clean(lfile, cache_data)
return s
@@ -663,7 +671,7 @@
# large.
lfdirstate = openlfdirstate(ui, repo)
dirtymatch = matchmod.always()
- unsure, s = lfdirstate.status(
+ unsure, s, mtime_boundary = lfdirstate.status(
dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
)
modifiedfiles = unsure + s.modified + s.added + s.removed
--- a/hgext/largefiles/overrides.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/largefiles/overrides.py Fri Feb 18 14:27:43 2022 +0100
@@ -51,11 +51,17 @@
storefactory,
)
+ACTION_ADD = mergestatemod.ACTION_ADD
+ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
+ACTION_GET = mergestatemod.ACTION_GET
+ACTION_KEEP = mergestatemod.ACTION_KEEP
+ACTION_REMOVE = mergestatemod.ACTION_REMOVE
+
eh = exthelper.exthelper()
lfstatus = lfutil.lfstatus
-MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
+MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
# -- Utility functions: commonly/repeatedly needed functionality ---------------
@@ -563,8 +569,9 @@
standin = lfutil.standin(lfile)
(lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
(sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
- if sm in (b'g', b'dc') and lm != b'r':
- if sm == b'dc':
+
+ if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
+ if sm == ACTION_DELETED_CHANGED:
f1, f2, fa, move, anc = sargs
sargs = (p2[f2].flags(), False)
# Case 1: normal file in the working copy, largefile in
@@ -578,26 +585,28 @@
% lfile
)
if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
- mresult.addfile(lfile, b'r', None, b'replaced by standin')
- mresult.addfile(standin, b'g', sargs, b'replaces standin')
+ mresult.addfile(
+ lfile, ACTION_REMOVE, None, b'replaced by standin'
+ )
+ mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
else: # keep local normal file
- mresult.addfile(lfile, b'k', None, b'replaces standin')
+ mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
if branchmerge:
mresult.addfile(
standin,
- b'k',
+ ACTION_KEEP,
None,
b'replaced by non-standin',
)
else:
mresult.addfile(
standin,
- b'r',
+ ACTION_REMOVE,
None,
b'replaced by non-standin',
)
- elif lm in (b'g', b'dc') and sm != b'r':
- if lm == b'dc':
+ if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
+ if lm == ACTION_DELETED_CHANGED:
f1, f2, fa, move, anc = largs
largs = (p2[f2].flags(), False)
# Case 2: largefile in the working copy, normal file in
@@ -615,11 +624,13 @@
# largefile can be restored from standin safely
mresult.addfile(
lfile,
- b'k',
+ ACTION_KEEP,
None,
b'replaced by standin',
)
- mresult.addfile(standin, b'k', None, b'replaces standin')
+ mresult.addfile(
+ standin, ACTION_KEEP, None, b'replaces standin'
+ )
else:
# "lfile" should be marked as "removed" without
# removal of itself
@@ -631,12 +642,12 @@
)
# linear-merge should treat this largefile as 're-added'
- mresult.addfile(standin, b'a', None, b'keep standin')
+ mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
else: # pick remote normal file
- mresult.addfile(lfile, b'g', largs, b'replaces standin')
+ mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
mresult.addfile(
standin,
- b'r',
+ ACTION_REMOVE,
None,
b'replaced by non-standin',
)
@@ -666,14 +677,12 @@
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits without prompting the user.
-@eh.wrapfunction(filemerge, b'_filemerge')
+@eh.wrapfunction(filemerge, b'filemerge')
def overridefilemerge(
- origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
+ origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
):
if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
- return origfn(
- premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
- )
+ return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
ahash = lfutil.readasstandin(fca).lower()
dhash = lfutil.readasstandin(fcd).lower()
@@ -697,7 +706,7 @@
)
):
repo.wwrite(fcd.path(), fco.data(), fco.flags())
- return True, 0, False
+ return 0, False
@eh.wrapfunction(copiesmod, b'pathcopies')
@@ -1519,7 +1528,7 @@
return orig(repo, matcher, prefix, uipathfn, opts)
# Get the list of missing largefiles so we can remove them
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
- unsure, s = lfdirstate.status(
+ unsure, s, mtime_boundary = lfdirstate.status(
matchmod.always(),
subrepos=[],
ignored=False,
@@ -1746,7 +1755,7 @@
# (*1) deprecated, but used internally (e.g: "rebase --collapse")
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
- unsure, s = lfdirstate.status(
+ unsure, s, mtime_boundary = lfdirstate.status(
matchmod.always(),
subrepos=[],
ignored=False,
--- a/hgext/largefiles/reposetup.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/largefiles/reposetup.py Fri Feb 18 14:27:43 2022 +0100
@@ -22,6 +22,8 @@
util,
)
+from mercurial.dirstateutils import timestamp
+
from . import (
lfcommands,
lfutil,
@@ -195,7 +197,7 @@
match._files = [f for f in match._files if sfindirstate(f)]
# Don't waste time getting the ignored and unknown
# files from lfdirstate
- unsure, s = lfdirstate.status(
+ unsure, s, mtime_boundary = lfdirstate.status(
match,
subrepos=[],
ignored=False,
@@ -210,6 +212,7 @@
s.clean,
)
if parentworking:
+ wctx = repo[None]
for lfile in unsure:
standin = lfutil.standin(lfile)
if standin not in ctx1:
@@ -222,7 +225,15 @@
else:
if listclean:
clean.append(lfile)
- lfdirstate.set_clean(lfile)
+ s = wctx[lfile].lstat()
+ mode = s.st_mode
+ size = s.st_size
+ mtime = timestamp.reliable_mtime_of(
+ s, mtime_boundary
+ )
+ if mtime is not None:
+ cache_data = (mode, size, mtime)
+ lfdirstate.set_clean(lfile, cache_data)
else:
tocheck = unsure + modified + added + clean
modified, added, clean = [], [], []
@@ -444,11 +455,12 @@
repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook)
def checkrequireslfiles(ui, repo, **kwargs):
- if b'largefiles' not in repo.requirements and any(
- lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles()
- ):
- repo.requirements.add(b'largefiles')
- scmutil.writereporequirements(repo)
+ with repo.lock():
+ if b'largefiles' not in repo.requirements and any(
+ lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles()
+ ):
+ repo.requirements.add(b'largefiles')
+ scmutil.writereporequirements(repo)
ui.setconfig(
b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles'
--- a/hgext/lfs/__init__.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/lfs/__init__.py Fri Feb 18 14:27:43 2022 +0100
@@ -257,25 +257,28 @@
if b'lfs' not in repo.requirements:
def checkrequireslfs(ui, repo, **kwargs):
- if b'lfs' in repo.requirements:
- return 0
+ with repo.lock():
+ if b'lfs' in repo.requirements:
+ return 0
- last = kwargs.get('node_last')
- if last:
- s = repo.set(b'%n:%n', bin(kwargs['node']), bin(last))
- else:
- s = repo.set(b'%n', bin(kwargs['node']))
- match = repo._storenarrowmatch
- for ctx in s:
- # TODO: is there a way to just walk the files in the commit?
- if any(
- ctx[f].islfs() for f in ctx.files() if f in ctx and match(f)
- ):
- repo.requirements.add(b'lfs')
- repo.features.add(repository.REPO_FEATURE_LFS)
- scmutil.writereporequirements(repo)
- repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
- break
+ last = kwargs.get('node_last')
+ if last:
+ s = repo.set(b'%n:%n', bin(kwargs['node']), bin(last))
+ else:
+ s = repo.set(b'%n', bin(kwargs['node']))
+ match = repo._storenarrowmatch
+ for ctx in s:
+ # TODO: is there a way to just walk the files in the commit?
+ if any(
+ ctx[f].islfs()
+ for f in ctx.files()
+ if f in ctx and match(f)
+ ):
+ repo.requirements.add(b'lfs')
+ repo.features.add(repository.REPO_FEATURE_LFS)
+ scmutil.writereporequirements(repo)
+ repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
+ break
ui.setconfig(b'hooks', b'commit.lfs', checkrequireslfs, b'lfs')
ui.setconfig(
--- a/hgext/narrow/narrowdirstate.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/narrow/narrowdirstate.py Fri Feb 18 14:27:43 2022 +0100
@@ -38,8 +38,8 @@
return super(narrowdirstate, self).normal(*args, **kwargs)
@_editfunc
- def set_tracked(self, *args):
- return super(narrowdirstate, self).set_tracked(*args)
+ def set_tracked(self, *args, **kwargs):
+ return super(narrowdirstate, self).set_tracked(*args, **kwargs)
@_editfunc
def set_untracked(self, *args):
--- a/hgext/notify.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/notify.py Fri Feb 18 14:27:43 2022 +0100
@@ -435,7 +435,10 @@
if spec is None:
subs.add(sub)
continue
- revs = self.repo.revs(b'%r and %d:', spec, ctx.rev())
+ try:
+ revs = self.repo.revs(b'%r and %d:', spec, ctx.rev())
+ except error.RepoLookupError:
+ continue
if len(revs):
subs.add(sub)
continue
--- a/hgext/rebase.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/rebase.py Fri Feb 18 14:27:43 2022 +0100
@@ -1544,7 +1544,7 @@
force=True,
ancestor=base,
mergeancestor=mergeancestor,
- labels=[b'dest', b'source'],
+ labels=[b'dest', b'source', b'parent of source'],
wc=wctx,
)
wctx.setparents(p1ctx.node(), repo[p2].node())
--- a/hgext/remotefilelog/README.md Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/remotefilelog/README.md Fri Feb 18 14:27:43 2022 +0100
@@ -88,7 +88,9 @@
4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment.
-5. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast.
+5. Similarly, subrepositories should not be used with completely shallow repos. Use `excludepattern=.hgsub*` in your client configuration to ensure that the files are downloaded.
+
+6. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast.
Contributing
============
--- a/hgext/remotefilelog/__init__.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/remotefilelog/__init__.py Fri Feb 18 14:27:43 2022 +0100
@@ -520,7 +520,7 @@
# Prefetch files before status attempts to look at their size and contents
-def checklookup(orig, self, files):
+def checklookup(orig, self, files, mtime_boundary):
repo = self._repo
if isenabled(repo):
prefetchfiles = []
@@ -530,7 +530,7 @@
prefetchfiles.append((f, hex(parent.filenode(f))))
# batch fetch the needed files from the server
repo.fileservice.prefetch(prefetchfiles)
- return orig(self, files)
+ return orig(self, files, mtime_boundary)
# Prefetch the logic that compares added and removed files for renames
--- a/hgext/remotefilelog/remotefilelog.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/remotefilelog/remotefilelog.py Fri Feb 18 14:27:43 2022 +0100
@@ -18,7 +18,6 @@
mdiff,
pycompat,
revlog,
- util,
)
from mercurial.utils import storageutil
from mercurial.revlogutils import flagutil
@@ -245,11 +244,11 @@
__bool__ = __nonzero__
def __len__(self):
- if self.filename == b'.hgtags':
- # The length of .hgtags is used to fast path tag checking.
- # remotefilelog doesn't support .hgtags since the entire .hgtags
- # history is needed. Use the excludepattern setting to make
- # .hgtags a normal filelog.
+ if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'):
+ # Global tag and subrepository support require access to the
+ # file history for various performance sensitive operations.
+ # excludepattern should be used for repositories depending on
+ # those features to fallback to regular filelog.
return 0
raise RuntimeError(b"len not supported")
@@ -360,17 +359,6 @@
)
return rev
- def _processflags(self, text, flags, operation, raw=False):
- """deprecated entry point to access flag processors"""
- msg = b'_processflag(...) use the specialized variant'
- util.nouideprecwarn(msg, b'5.2', stacklevel=2)
- if raw:
- return text, flagutil.processflagsraw(self, text, flags)
- elif operation == b'read':
- return flagutil.processflagsread(self, text, flags)
- else: # write operation
- return flagutil.processflagswrite(self, text, flags)
-
def revision(self, node, raw=False):
"""returns the revlog contents at this node.
this includes the meta data traditionally included in file revlogs.
--- a/hgext/sparse.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/sparse.py Fri Feb 18 14:27:43 2022 +0100
@@ -76,6 +76,7 @@
from mercurial.i18n import _
from mercurial.pycompat import setattr
from mercurial import (
+ cmdutil,
commands,
dirstate,
error,
@@ -153,22 +154,11 @@
def _clonesparsecmd(orig, ui, repo, *args, **opts):
- include_pat = opts.get('include')
- exclude_pat = opts.get('exclude')
- enableprofile_pat = opts.get('enable_profile')
+ include = opts.get('include')
+ exclude = opts.get('exclude')
+ enableprofile = opts.get('enable_profile')
narrow_pat = opts.get('narrow')
- include = exclude = enableprofile = False
- if include_pat:
- pat = include_pat
- include = True
- if exclude_pat:
- pat = exclude_pat
- exclude = True
- if enableprofile_pat:
- pat = enableprofile_pat
- enableprofile = True
- if sum([include, exclude, enableprofile]) > 1:
- raise error.Abort(_(b"too many flags specified."))
+
# if --narrow is passed, it means they are includes and excludes for narrow
# clone
if not narrow_pat and (include or exclude or enableprofile):
@@ -176,7 +166,6 @@
def clonesparse(orig, ctx, *args, **kwargs):
sparse.updateconfig(
ctx.repo().unfiltered(),
- pat,
{},
include=include,
exclude=exclude,
@@ -214,7 +203,7 @@
for pat in pats:
dirname, basename = util.split(pat)
dirs.add(dirname)
- sparse.updateconfig(repo, list(dirs), opts, include=True)
+ sparse.updateconfig(repo, opts, include=list(dirs))
return orig(ui, repo, *pats, **opts)
extensions.wrapcommand(commands.table, b'add', _add)
@@ -286,18 +275,54 @@
@command(
b'debugsparse',
[
- (b'I', b'include', False, _(b'include files in the sparse checkout')),
- (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
- (b'd', b'delete', False, _(b'delete an include/exclude rule')),
+ (
+ b'I',
+ b'include',
+ [],
+ _(b'include files in the sparse checkout'),
+ _(b'PATTERN'),
+ ),
+ (
+ b'X',
+ b'exclude',
+ [],
+ _(b'exclude files in the sparse checkout'),
+ _(b'PATTERN'),
+ ),
+ (
+ b'd',
+ b'delete',
+ [],
+ _(b'delete an include/exclude rule'),
+ _(b'PATTERN'),
+ ),
(
b'f',
b'force',
False,
_(b'allow changing rules even with pending changes'),
),
- (b'', b'enable-profile', False, _(b'enables the specified profile')),
- (b'', b'disable-profile', False, _(b'disables the specified profile')),
- (b'', b'import-rules', False, _(b'imports rules from a file')),
+ (
+ b'',
+ b'enable-profile',
+ [],
+ _(b'enables the specified profile'),
+ _(b'PATTERN'),
+ ),
+ (
+ b'',
+ b'disable-profile',
+ [],
+ _(b'disables the specified profile'),
+ _(b'PATTERN'),
+ ),
+ (
+ b'',
+ b'import-rules',
+ [],
+ _(b'imports rules from a file'),
+ _(b'PATTERN'),
+ ),
(b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
(
b'',
@@ -308,10 +333,10 @@
(b'', b'reset', False, _(b'makes the repo full again')),
]
+ commands.templateopts,
- _(b'[--OPTION] PATTERN...'),
+ _(b'[--OPTION]'),
helpbasic=True,
)
-def debugsparse(ui, repo, *pats, **opts):
+def debugsparse(ui, repo, **opts):
"""make the current checkout sparse, or edit the existing checkout
The sparse command is used to make the current checkout sparse.
@@ -363,19 +388,13 @@
delete = opts.get(b'delete')
refresh = opts.get(b'refresh')
reset = opts.get(b'reset')
- count = sum(
- [
- include,
- exclude,
- enableprofile,
- disableprofile,
- delete,
- importrules,
- refresh,
- clearrules,
- reset,
- ]
+ action = cmdutil.check_at_most_one_arg(
+ opts, b'import_rules', b'clear_rules', b'refresh'
)
+ updateconfig = bool(
+ include or exclude or delete or reset or enableprofile or disableprofile
+ )
+ count = sum([updateconfig, bool(action)])
if count > 1:
raise error.Abort(_(b"too many flags specified"))
@@ -397,10 +416,9 @@
)
)
- if include or exclude or delete or reset or enableprofile or disableprofile:
+ if updateconfig:
sparse.updateconfig(
repo,
- pats,
opts,
include=include,
exclude=exclude,
@@ -412,7 +430,7 @@
)
if importrules:
- sparse.importfromfiles(repo, opts, pats, force=force)
+ sparse.importfromfiles(repo, opts, importrules, force=force)
if clearrules:
sparse.clearrules(repo, force=force)
--- a/hgext/win32text.py Fri Feb 18 12:55:39 2022 +0100
+++ b/hgext/win32text.py Fri Feb 18 14:27:43 2022 +0100
@@ -47,6 +47,8 @@
from mercurial.i18n import _
from mercurial.node import short
from mercurial import (
+ cmdutil,
+ extensions,
pycompat,
registrar,
)
@@ -215,6 +217,23 @@
repo.adddatafilter(name, fn)
+def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs):
+ # reset dirstate cache for file we touch
+ ds = repo.dirstate
+ with ds.parentchange():
+ for filename in actions[b'revert'][0]:
+ entry = ds.get_entry(filename)
+ if entry is not None:
+ if entry.p1_tracked:
+ ds.update_file(
+ filename,
+ entry.tracked,
+ p1_tracked=True,
+ p2_info=entry.p2_info,
+ )
+ return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs)
+
+
def extsetup(ui):
# deprecated config: win32text.warn
if ui.configbool(b'win32text', b'warn'):
@@ -224,3 +243,4 @@
b"https://mercurial-scm.org/wiki/Win32TextExtension\n"
)
)
+ extensions.wrapfunction(cmdutil, '_performrevert', wrap_revert)
--- a/mercurial/bookmarks.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/bookmarks.py Fri Feb 18 14:27:43 2022 +0100
@@ -22,6 +22,7 @@
error,
obsutil,
pycompat,
+ requirements,
scmutil,
txnutil,
util,
@@ -36,11 +37,9 @@
# custom styles
activebookmarklabel = b'bookmarks.active bookmarks.current'
-BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore'
-
def bookmarksinstore(repo):
- return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
+ return requirements.BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
def bookmarksvfs(repo):
@@ -213,7 +212,11 @@
The transaction is then responsible for updating the file content."""
location = b'' if bookmarksinstore(self._repo) else b'plain'
tr.addfilegenerator(
- b'bookmarks', (b'bookmarks',), self._write, location=location
+ b'bookmarks',
+ (b'bookmarks',),
+ self._write,
+ location=location,
+ post_finalize=True,
)
tr.hookargs[b'bookmark_moved'] = b'1'
--- a/mercurial/branchmap.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/branchmap.py Fri Feb 18 14:27:43 2022 +0100
@@ -17,6 +17,7 @@
from . import (
encoding,
error,
+ obsolete,
pycompat,
scmutil,
util,
@@ -184,7 +185,7 @@
The first line is used to check if the cache is still valid. If the
branch cache is for a filtered repo view, an optional third hash is
- included that hashes the hashes of all filtered revisions.
+ included that hashes the hashes of all filtered and obsolete revisions.
The open/closed state is represented by a single letter 'o' or 'c'.
This field can be used to avoid changelog reads when determining if a
@@ -351,16 +352,25 @@
return filename
def validfor(self, repo):
- """Is the cache content valid regarding a repo
+ """check that cache contents are valid for (a subset of) this repo
- - False when cached tipnode is unknown or if we detect a strip.
- - True when cache is up to date or a subset of current repo."""
+ - False when the order of changesets changed or if we detect a strip.
+ - True when cache is up-to-date for the current repo or its subset."""
try:
- return (self.tipnode == repo.changelog.node(self.tiprev)) and (
- self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
- )
+ node = repo.changelog.node(self.tiprev)
except IndexError:
+ # changesets were stripped and now we don't even have enough to
+ # find tiprev
return False
+ if self.tipnode != node:
+ # tiprev doesn't correspond to tipnode: repo was stripped, or this
+ # repo has a different order of changesets
+ return False
+ tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
+ # hashes don't match if this repo view has a different set of filtered
+ # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
+ # history was rewritten)
+ return self.filteredhash == tiphash
def _branchtip(self, heads):
"""Return tuple with last open head in heads and false,
@@ -478,6 +488,9 @@
# use the faster unfiltered parent accessor.
parentrevs = repo.unfiltered().changelog.parentrevs
+ # Faster than using ctx.obsolete()
+ obsrevs = obsolete.getrevs(repo, b'obsolete')
+
for branch, newheadrevs in pycompat.iteritems(newbranches):
# For every branch, compute the new branchheads.
# A branchhead is a revision such that no descendant is on
@@ -514,10 +527,15 @@
# checks can be skipped. Otherwise, the ancestors of the
# "uncertain" set are removed from branchheads.
# This computation is heavy and avoided if at all possible.
- bheads = self._entries.setdefault(branch, [])
+ bheads = self._entries.get(branch, [])
bheadset = {cl.rev(node) for node in bheads}
uncertain = set()
for newrev in sorted(newheadrevs):
+ if newrev in obsrevs:
+ # We ignore obsolete changesets as they shouldn't be
+ # considered heads.
+ continue
+
if not bheadset:
bheadset.add(newrev)
continue
@@ -525,13 +543,22 @@
parents = [p for p in parentrevs(newrev) if p != nullrev]
samebranch = set()
otherbranch = set()
+ obsparents = set()
for p in parents:
- if p in bheadset or getbranchinfo(p)[0] == branch:
+ if p in obsrevs:
+ # We ignored this obsolete changeset earlier, but now
+ # that it has non-ignored children, we need to make
+ # sure their ancestors are not considered heads. To
+ # achieve that, we will simply treat this obsolete
+ # changeset as a parent from other branch.
+ obsparents.add(p)
+ elif p in bheadset or getbranchinfo(p)[0] == branch:
samebranch.add(p)
else:
otherbranch.add(p)
- if otherbranch and not (len(bheadset) == len(samebranch) == 1):
+ if not (len(bheadset) == len(samebranch) == 1):
uncertain.update(otherbranch)
+ uncertain.update(obsparents)
bheadset.difference_update(samebranch)
bheadset.add(newrev)
@@ -540,11 +567,12 @@
topoheads = set(cl.headrevs())
if bheadset - topoheads:
floorrev = min(bheadset)
- ancestors = set(cl.ancestors(newheadrevs, floorrev))
- bheadset -= ancestors
- bheadrevs = sorted(bheadset)
- self[branch] = [cl.node(rev) for rev in bheadrevs]
- tiprev = bheadrevs[-1]
+ if floorrev <= max(uncertain):
+ ancestors = set(cl.ancestors(uncertain, floorrev))
+ bheadset -= ancestors
+ if bheadset:
+ self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
+ tiprev = max(newheadrevs)
if tiprev > ntiprev:
ntiprev = tiprev
@@ -553,15 +581,24 @@
self.tipnode = cl.node(ntiprev)
if not self.validfor(repo):
- # cache key are not valid anymore
+ # old cache key is now invalid for the repo, but we've just updated
+ # the cache and we assume it's valid, so let's make the cache key
+ # valid as well by recomputing it from the cached data
self.tipnode = repo.nullid
self.tiprev = nullrev
for heads in self.iterheads():
+ if not heads:
+ # all revisions on a branch are obsolete
+ continue
+ # note: tiprev is not necessarily the tip revision of repo,
+ # because the tip could be obsolete (i.e. not a head)
tiprev = max(cl.rev(node) for node in heads)
if tiprev > self.tiprev:
self.tipnode = cl.node(tiprev)
self.tiprev = tiprev
- self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
+ self.filteredhash = scmutil.filteredhash(
+ repo, self.tiprev, needobsolete=True
+ )
duration = util.timer() - starttime
repo.ui.log(
--- a/mercurial/bundle2.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/bundle2.py Fri Feb 18 14:27:43 2022 +0100
@@ -1886,7 +1886,8 @@
filecount, bytecount, it = streamclone.generatev2(
repo, includepats, excludepats, includeobsmarkers
)
- requirements = _formatrequirementsspec(repo.requirements)
+ requirements = streamclone.streamed_requirements(repo)
+ requirements = _formatrequirementsspec(requirements)
part = bundler.newpart(b'stream2', data=it)
part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
@@ -2419,7 +2420,7 @@
op.records.add(b'bookmarks', record)
else:
raise error.ProgrammingError(
- b'unkown bookmark mode: %s' % bookmarksmode
+ b'unknown bookmark mode: %s' % bookmarksmode
)
--- a/mercurial/bundlecaches.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/bundlecaches.py Fri Feb 18 14:27:43 2022 +0100
@@ -195,7 +195,7 @@
# repo supports and error if the bundle isn't compatible.
if version == b'packed1' and b'requirements' in params:
requirements = set(params[b'requirements'].split(b','))
- missingreqs = requirements - repo.supportedformats
+ missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
if missingreqs:
raise error.UnsupportedBundleSpecification(
_(b'missing support for repository features: %s')
--- a/mercurial/cext/parsers.c Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/cext/parsers.c Fri Feb 18 14:27:43 2022 +0100
@@ -61,11 +61,13 @@
int p2_info;
int has_meaningful_data;
int has_meaningful_mtime;
+ int mtime_second_ambiguous;
int mode;
int size;
int mtime_s;
int mtime_ns;
PyObject *parentfiledata;
+ PyObject *mtime;
PyObject *fallback_exec;
PyObject *fallback_symlink;
static char *keywords_name[] = {
@@ -78,6 +80,7 @@
p2_info = 0;
has_meaningful_mtime = 1;
has_meaningful_data = 1;
+ mtime_second_ambiguous = 0;
parentfiledata = Py_None;
fallback_exec = Py_None;
fallback_symlink = Py_None;
@@ -118,10 +121,18 @@
}
if (parentfiledata != Py_None) {
- if (!PyArg_ParseTuple(parentfiledata, "ii(ii)", &mode, &size,
- &mtime_s, &mtime_ns)) {
+ if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
+ &mtime)) {
return NULL;
}
+ if (mtime != Py_None) {
+ if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
+ &mtime_second_ambiguous)) {
+ return NULL;
+ }
+ } else {
+ has_meaningful_mtime = 0;
+ }
} else {
has_meaningful_data = 0;
has_meaningful_mtime = 0;
@@ -130,6 +141,9 @@
t->flags |= dirstate_flag_has_meaningful_data;
t->mode = mode;
t->size = size;
+ if (mtime_second_ambiguous) {
+ t->flags |= dirstate_flag_mtime_second_ambiguous;
+ }
} else {
t->mode = 0;
t->size = 0;
@@ -255,7 +269,8 @@
} else if (!(self->flags & dirstate_flag_has_mtime) ||
!(self->flags & dirstate_flag_p1_tracked) ||
!(self->flags & dirstate_flag_wc_tracked) ||
- (self->flags & dirstate_flag_p2_info)) {
+ (self->flags & dirstate_flag_p2_info) ||
+ (self->flags & dirstate_flag_mtime_second_ambiguous)) {
return ambiguous_time;
} else {
return self->mtime_s;
@@ -311,33 +326,30 @@
return PyInt_FromLong(dirstate_item_c_v1_mtime(self));
};
-static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
- PyObject *now)
-{
- int now_s;
- int now_ns;
- if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) {
- return NULL;
- }
- if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) {
- Py_RETURN_TRUE;
- } else {
- Py_RETURN_FALSE;
- }
-};
-
static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
PyObject *other)
{
int other_s;
int other_ns;
- if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) {
+ int other_second_ambiguous;
+ if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns,
+ &other_second_ambiguous)) {
return NULL;
}
- if ((self->flags & dirstate_flag_has_mtime) &&
- self->mtime_s == other_s &&
- (self->mtime_ns == other_ns || self->mtime_ns == 0 ||
- other_ns == 0)) {
+ if (!(self->flags & dirstate_flag_has_mtime)) {
+ Py_RETURN_FALSE;
+ }
+ if (self->mtime_s != other_s) {
+ Py_RETURN_FALSE;
+ }
+ if (self->mtime_ns == 0 || other_ns == 0) {
+ if (self->flags & dirstate_flag_mtime_second_ambiguous) {
+ Py_RETURN_FALSE;
+ } else {
+ Py_RETURN_TRUE;
+ }
+ }
+ if (self->mtime_ns == other_ns) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -438,14 +450,6 @@
dirstate_flag_has_meaningful_data |
dirstate_flag_has_mtime);
}
- if (t->flags & dirstate_flag_mtime_second_ambiguous) {
- /* The current code is not able to do the more subtle comparison
- * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the
- * mtime */
- t->flags &= ~(dirstate_flag_mtime_second_ambiguous |
- dirstate_flag_has_meaningful_data |
- dirstate_flag_has_mtime);
- }
t->mode = 0;
if (t->flags & dirstate_flag_has_meaningful_data) {
if (t->flags & dirstate_flag_mode_exec_perm) {
@@ -474,14 +478,28 @@
static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
PyObject *args)
{
- int size, mode, mtime_s, mtime_ns;
- if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s,
- &mtime_ns)) {
+ int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous;
+ PyObject *mtime;
+ mtime_s = 0;
+ mtime_ns = 0;
+ mtime_second_ambiguous = 0;
+ if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
return NULL;
}
+ if (mtime != Py_None) {
+ if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
+ &mtime_second_ambiguous)) {
+ return NULL;
+ }
+ } else {
+ self->flags &= ~dirstate_flag_has_mtime;
+ }
self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
dirstate_flag_has_meaningful_data |
dirstate_flag_has_mtime;
+ if (mtime_second_ambiguous) {
+ self->flags |= dirstate_flag_mtime_second_ambiguous;
+ }
self->mode = mode;
self->size = size;
self->mtime_s = mtime_s;
@@ -530,8 +548,6 @@
"return a \"size\" suitable for v1 serialization"},
{"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
"return a \"mtime\" suitable for v1 serialization"},
- {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
- "True if the stored mtime would be ambiguous with the current time"},
{"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
METH_O, "True if the stored mtime is likely equal to the given mtime"},
{"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
@@ -904,12 +920,9 @@
Py_ssize_t nbytes, pos, l;
PyObject *k, *v = NULL, *pn;
char *p, *s;
- int now_s;
- int now_ns;
- if (!PyArg_ParseTuple(args, "O!O!O!(ii):pack_dirstate", &PyDict_Type,
- &map, &PyDict_Type, ©map, &PyTuple_Type, &pl,
- &now_s, &now_ns)) {
+ if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map,
+ &PyDict_Type, ©map, &PyTuple_Type, &pl)) {
return NULL;
}
@@ -978,21 +991,6 @@
mode = dirstate_item_c_v1_mode(tuple);
size = dirstate_item_c_v1_size(tuple);
mtime = dirstate_item_c_v1_mtime(tuple);
- if (state == 'n' && tuple->mtime_s == now_s) {
- /* See pure/parsers.py:pack_dirstate for why we do
- * this. */
- mtime = -1;
- mtime_unset = (PyObject *)dirstate_item_from_v1_data(
- state, mode, size, mtime);
- if (!mtime_unset) {
- goto bail;
- }
- if (PyDict_SetItem(map, k, mtime_unset) == -1) {
- goto bail;
- }
- Py_DECREF(mtime_unset);
- mtime_unset = NULL;
- }
*p++ = state;
putbe32((uint32_t)mode, p);
putbe32((uint32_t)size, p + 4);
--- a/mercurial/cext/revlog.c Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/cext/revlog.c Fri Feb 18 14:27:43 2022 +0100
@@ -103,8 +103,7 @@
*/
long rust_ext_compat; /* compatibility with being used in rust
extensions */
- char format_version; /* size of index headers. Differs in v1 v.s. v2
- format */
+ long format_version; /* format version selector (format_*) */
};
static Py_ssize_t index_length(const indexObject *self)
@@ -120,9 +119,11 @@
static int index_find_node(indexObject *self, const char *node);
#if LONG_MAX == 0x7fffffffL
-static const char *const tuple_format = PY23("Kiiiiiis#KiBB", "Kiiiiiiy#KiBB");
+static const char *const tuple_format =
+ PY23("Kiiiiiis#KiBBi", "Kiiiiiiy#KiBBi");
#else
-static const char *const tuple_format = PY23("kiiiiiis#kiBB", "kiiiiiiy#kiBB");
+static const char *const tuple_format =
+ PY23("kiiiiiis#kiBBi", "kiiiiiiy#kiBBi");
#endif
/* A RevlogNG v1 index entry is 64 bytes long. */
@@ -131,10 +132,54 @@
/* A Revlogv2 index entry is 96 bytes long. */
static const long v2_entry_size = 96;
-static const long format_v1 = 1; /* Internal only, could be any number */
-static const long format_v2 = 2; /* Internal only, could be any number */
+/* A Changelogv2 index entry is 96 bytes long. */
+static const long cl2_entry_size = 96;
+
+/* Internal format version.
+ * Must match their counterparts in revlogutils/constants.py */
+static const long format_v1 = 1; /* constants.py: REVLOGV1 */
+static const long format_v2 = 0xDEAD; /* constants.py: REVLOGV2 */
+static const long format_cl2 = 0xD34D; /* constants.py: CHANGELOGV2 */
+
+static const long entry_v1_offset_high = 0;
+static const long entry_v1_offset_offset_flags = 4;
+static const long entry_v1_offset_comp_len = 8;
+static const long entry_v1_offset_uncomp_len = 12;
+static const long entry_v1_offset_base_rev = 16;
+static const long entry_v1_offset_link_rev = 20;
+static const long entry_v1_offset_parent_1 = 24;
+static const long entry_v1_offset_parent_2 = 28;
+static const long entry_v1_offset_node_id = 32;
+
+static const long entry_v2_offset_high = 0;
+static const long entry_v2_offset_offset_flags = 4;
+static const long entry_v2_offset_comp_len = 8;
+static const long entry_v2_offset_uncomp_len = 12;
+static const long entry_v2_offset_base_rev = 16;
+static const long entry_v2_offset_link_rev = 20;
+static const long entry_v2_offset_parent_1 = 24;
+static const long entry_v2_offset_parent_2 = 28;
+static const long entry_v2_offset_node_id = 32;
+static const long entry_v2_offset_sidedata_offset = 64;
+static const long entry_v2_offset_sidedata_comp_len = 72;
+static const long entry_v2_offset_all_comp_mode = 76;
+/* next free offset: 77 */
+
+static const long entry_cl2_offset_high = 0;
+static const long entry_cl2_offset_offset_flags = 4;
+static const long entry_cl2_offset_comp_len = 8;
+static const long entry_cl2_offset_uncomp_len = 12;
+static const long entry_cl2_offset_parent_1 = 16;
+static const long entry_cl2_offset_parent_2 = 20;
+static const long entry_cl2_offset_node_id = 24;
+static const long entry_cl2_offset_sidedata_offset = 56;
+static const long entry_cl2_offset_sidedata_comp_len = 64;
+static const long entry_cl2_offset_all_comp_mode = 68;
+static const long entry_cl2_offset_rank = 69;
+/* next free offset: 73 */
static const char comp_mode_inline = 2;
+static const char rank_unknown = -1;
static void raise_revlog_error(void)
{
@@ -203,8 +248,19 @@
{
const char *data = index_deref(self, rev);
- ps[0] = getbe32(data + 24);
- ps[1] = getbe32(data + 28);
+ if (self->format_version == format_v1) {
+ ps[0] = getbe32(data + entry_v1_offset_parent_1);
+ ps[1] = getbe32(data + entry_v1_offset_parent_2);
+ } else if (self->format_version == format_v2) {
+ ps[0] = getbe32(data + entry_v2_offset_parent_1);
+ ps[1] = getbe32(data + entry_v2_offset_parent_2);
+ } else if (self->format_version == format_cl2) {
+ ps[0] = getbe32(data + entry_cl2_offset_parent_1);
+ ps[1] = getbe32(data + entry_cl2_offset_parent_2);
+ } else {
+ raise_revlog_error();
+ return -1;
+ }
/* If index file is corrupted, ps[] may point to invalid revisions. So
* there is a risk of buffer overflow to trust them unconditionally. */
@@ -251,14 +307,36 @@
return 0;
data = index_deref(self, rev);
- offset = getbe32(data + 4);
- if (rev == 0) {
- /* mask out version number for the first entry */
- offset &= 0xFFFF;
+
+ if (self->format_version == format_v1) {
+ offset = getbe32(data + entry_v1_offset_offset_flags);
+ if (rev == 0) {
+ /* mask out version number for the first entry */
+ offset &= 0xFFFF;
+ } else {
+ uint32_t offset_high =
+ getbe32(data + entry_v1_offset_high);
+ offset |= ((uint64_t)offset_high) << 32;
+ }
+ } else if (self->format_version == format_v2) {
+ offset = getbe32(data + entry_v2_offset_offset_flags);
+ if (rev == 0) {
+ /* mask out version number for the first entry */
+ offset &= 0xFFFF;
+ } else {
+ uint32_t offset_high =
+ getbe32(data + entry_v2_offset_high);
+ offset |= ((uint64_t)offset_high) << 32;
+ }
+ } else if (self->format_version == format_cl2) {
+ uint32_t offset_high = getbe32(data + entry_cl2_offset_high);
+ offset = getbe32(data + entry_cl2_offset_offset_flags);
+ offset |= ((uint64_t)offset_high) << 32;
} else {
- uint32_t offset_high = getbe32(data);
- offset |= ((uint64_t)offset_high) << 32;
+ raise_revlog_error();
+ return -1;
}
+
return (int64_t)(offset >> 16);
}
@@ -272,7 +350,16 @@
data = index_deref(self, rev);
- tmp = (int)getbe32(data + 8);
+ if (self->format_version == format_v1) {
+ tmp = (int)getbe32(data + entry_v1_offset_comp_len);
+ } else if (self->format_version == format_v2) {
+ tmp = (int)getbe32(data + entry_v2_offset_comp_len);
+ } else if (self->format_version == format_cl2) {
+ tmp = (int)getbe32(data + entry_cl2_offset_comp_len);
+ } else {
+ raise_revlog_error();
+ return -1;
+ }
if (tmp < 0) {
PyErr_Format(PyExc_OverflowError,
"revlog entry size out of bound (%d)", tmp);
@@ -297,7 +384,7 @@
{
uint64_t offset_flags, sidedata_offset;
int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
- sidedata_comp_len;
+ sidedata_comp_len, rank = rank_unknown;
char data_comp_mode, sidedata_comp_mode;
const char *c_node_id;
const char *data;
@@ -317,42 +404,96 @@
if (data == NULL)
return NULL;
- offset_flags = getbe32(data + 4);
- /*
- * The first entry on-disk needs the version number masked out,
- * but this doesn't apply if entries are added to an empty index.
- */
- if (self->length && pos == 0)
- offset_flags &= 0xFFFF;
- else {
- uint32_t offset_high = getbe32(data);
- offset_flags |= ((uint64_t)offset_high) << 32;
- }
-
- comp_len = getbe32(data + 8);
- uncomp_len = getbe32(data + 12);
- base_rev = getbe32(data + 16);
- link_rev = getbe32(data + 20);
- parent_1 = getbe32(data + 24);
- parent_2 = getbe32(data + 28);
- c_node_id = data + 32;
-
if (self->format_version == format_v1) {
+ offset_flags = getbe32(data + entry_v1_offset_offset_flags);
+ /*
+ * The first entry on-disk needs the version number masked out,
+ * but this doesn't apply if entries are added to an empty
+ * index.
+ */
+ if (self->length && pos == 0)
+ offset_flags &= 0xFFFF;
+ else {
+ uint32_t offset_high =
+ getbe32(data + entry_v1_offset_high);
+ offset_flags |= ((uint64_t)offset_high) << 32;
+ }
+
+ comp_len = getbe32(data + entry_v1_offset_comp_len);
+ uncomp_len = getbe32(data + entry_v1_offset_uncomp_len);
+ base_rev = getbe32(data + entry_v1_offset_base_rev);
+ link_rev = getbe32(data + entry_v1_offset_link_rev);
+ parent_1 = getbe32(data + entry_v1_offset_parent_1);
+ parent_2 = getbe32(data + entry_v1_offset_parent_2);
+ c_node_id = data + entry_v1_offset_node_id;
+
sidedata_offset = 0;
sidedata_comp_len = 0;
data_comp_mode = comp_mode_inline;
sidedata_comp_mode = comp_mode_inline;
+ } else if (self->format_version == format_v2) {
+ offset_flags = getbe32(data + entry_v2_offset_offset_flags);
+ /*
+ * The first entry on-disk needs the version number masked out,
+ * but this doesn't apply if entries are added to an empty
+ * index.
+ */
+ if (self->length && pos == 0)
+ offset_flags &= 0xFFFF;
+ else {
+ uint32_t offset_high =
+ getbe32(data + entry_v2_offset_high);
+ offset_flags |= ((uint64_t)offset_high) << 32;
+ }
+
+ comp_len = getbe32(data + entry_v2_offset_comp_len);
+ uncomp_len = getbe32(data + entry_v2_offset_uncomp_len);
+ base_rev = getbe32(data + entry_v2_offset_base_rev);
+ link_rev = getbe32(data + entry_v2_offset_link_rev);
+ parent_1 = getbe32(data + entry_v2_offset_parent_1);
+ parent_2 = getbe32(data + entry_v2_offset_parent_2);
+ c_node_id = data + entry_v2_offset_node_id;
+
+ sidedata_offset =
+ getbe64(data + entry_v2_offset_sidedata_offset);
+ sidedata_comp_len =
+ getbe32(data + entry_v2_offset_sidedata_comp_len);
+ data_comp_mode = data[entry_v2_offset_all_comp_mode] & 3;
+ sidedata_comp_mode =
+ ((data[entry_v2_offset_all_comp_mode] >> 2) & 3);
+ } else if (self->format_version == format_cl2) {
+ uint32_t offset_high = getbe32(data + entry_cl2_offset_high);
+ offset_flags = getbe32(data + entry_cl2_offset_offset_flags);
+ offset_flags |= ((uint64_t)offset_high) << 32;
+ comp_len = getbe32(data + entry_cl2_offset_comp_len);
+ uncomp_len = getbe32(data + entry_cl2_offset_uncomp_len);
+ /* base_rev and link_rev are not stored in changelogv2, but are
+ still used by some functions shared with the other revlogs.
+ They are supposed to contain links to other revisions,
+ but they always point to themselves in the case of a changelog.
+ */
+ base_rev = pos;
+ link_rev = pos;
+ parent_1 = getbe32(data + entry_cl2_offset_parent_1);
+ parent_2 = getbe32(data + entry_cl2_offset_parent_2);
+ c_node_id = data + entry_cl2_offset_node_id;
+ sidedata_offset =
+ getbe64(data + entry_cl2_offset_sidedata_offset);
+ sidedata_comp_len =
+ getbe32(data + entry_cl2_offset_sidedata_comp_len);
+ data_comp_mode = data[entry_cl2_offset_all_comp_mode] & 3;
+ sidedata_comp_mode =
+ ((data[entry_cl2_offset_all_comp_mode] >> 2) & 3);
+ rank = getbe32(data + entry_cl2_offset_rank);
} else {
- sidedata_offset = getbe64(data + 64);
- sidedata_comp_len = getbe32(data + 72);
- data_comp_mode = data[76] & 3;
- sidedata_comp_mode = ((data[76] >> 2) & 3);
+ raise_revlog_error();
+ return NULL;
}
return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
base_rev, link_rev, parent_1, parent_2, c_node_id,
self->nodelen, sidedata_offset, sidedata_comp_len,
- data_comp_mode, sidedata_comp_mode);
+ data_comp_mode, sidedata_comp_mode, rank);
}
/*
* Pack header information in binary
@@ -410,6 +551,7 @@
{
Py_ssize_t length = index_length(self);
const char *data;
+ const char *node_id;
if (pos == nullrev)
return nullid;
@@ -418,7 +560,19 @@
return NULL;
data = index_deref(self, pos);
- return data ? data + 32 : NULL;
+
+ if (self->format_version == format_v1) {
+ node_id = data + entry_v1_offset_node_id;
+ } else if (self->format_version == format_v2) {
+ node_id = data + entry_v2_offset_node_id;
+ } else if (self->format_version == format_cl2) {
+ node_id = data + entry_cl2_offset_node_id;
+ } else {
+ raise_revlog_error();
+ return NULL;
+ }
+
+ return data ? node_id : NULL;
}
/*
@@ -453,7 +607,7 @@
{
uint64_t offset_flags, sidedata_offset;
int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
- sidedata_comp_len;
+ sidedata_comp_len, rank;
char data_comp_mode, sidedata_comp_mode;
Py_ssize_t c_node_id_len;
const char *c_node_id;
@@ -464,8 +618,8 @@
&uncomp_len, &base_rev, &link_rev, &parent_1,
&parent_2, &c_node_id, &c_node_id_len,
&sidedata_offset, &sidedata_comp_len,
- &data_comp_mode, &sidedata_comp_mode)) {
- PyErr_SetString(PyExc_TypeError, "11-tuple required");
+ &data_comp_mode, &sidedata_comp_mode, &rank)) {
+ PyErr_SetString(PyExc_TypeError, "12-tuple required");
return NULL;
}
@@ -501,25 +655,61 @@
}
rev = self->length + self->new_length;
data = self->added + self->entry_size * self->new_length++;
- putbe32(offset_flags >> 32, data);
- putbe32(offset_flags & 0xffffffffU, data + 4);
- putbe32(comp_len, data + 8);
- putbe32(uncomp_len, data + 12);
- putbe32(base_rev, data + 16);
- putbe32(link_rev, data + 20);
- putbe32(parent_1, data + 24);
- putbe32(parent_2, data + 28);
- memcpy(data + 32, c_node_id, c_node_id_len);
- /* Padding since SHA-1 is only 20 bytes for now */
- memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
- if (self->format_version == format_v2) {
- putbe64(sidedata_offset, data + 64);
- putbe32(sidedata_comp_len, data + 72);
+
+ memset(data, 0, self->entry_size);
+
+ if (self->format_version == format_v1) {
+ putbe32(offset_flags >> 32, data + entry_v1_offset_high);
+ putbe32(offset_flags & 0xffffffffU,
+ data + entry_v1_offset_offset_flags);
+ putbe32(comp_len, data + entry_v1_offset_comp_len);
+ putbe32(uncomp_len, data + entry_v1_offset_uncomp_len);
+ putbe32(base_rev, data + entry_v1_offset_base_rev);
+ putbe32(link_rev, data + entry_v1_offset_link_rev);
+ putbe32(parent_1, data + entry_v1_offset_parent_1);
+ putbe32(parent_2, data + entry_v1_offset_parent_2);
+ memcpy(data + entry_v1_offset_node_id, c_node_id,
+ c_node_id_len);
+ } else if (self->format_version == format_v2) {
+ putbe32(offset_flags >> 32, data + entry_v2_offset_high);
+ putbe32(offset_flags & 0xffffffffU,
+ data + entry_v2_offset_offset_flags);
+ putbe32(comp_len, data + entry_v2_offset_comp_len);
+ putbe32(uncomp_len, data + entry_v2_offset_uncomp_len);
+ putbe32(base_rev, data + entry_v2_offset_base_rev);
+ putbe32(link_rev, data + entry_v2_offset_link_rev);
+ putbe32(parent_1, data + entry_v2_offset_parent_1);
+ putbe32(parent_2, data + entry_v2_offset_parent_2);
+ memcpy(data + entry_v2_offset_node_id, c_node_id,
+ c_node_id_len);
+ putbe64(sidedata_offset,
+ data + entry_v2_offset_sidedata_offset);
+ putbe32(sidedata_comp_len,
+ data + entry_v2_offset_sidedata_comp_len);
comp_field = data_comp_mode & 3;
comp_field = comp_field | (sidedata_comp_mode & 3) << 2;
- data[76] = comp_field;
- /* Padding for 96 bytes alignment */
- memset(data + 77, 0, self->entry_size - 77);
+ data[entry_v2_offset_all_comp_mode] = comp_field;
+ } else if (self->format_version == format_cl2) {
+ putbe32(offset_flags >> 32, data + entry_cl2_offset_high);
+ putbe32(offset_flags & 0xffffffffU,
+ data + entry_cl2_offset_offset_flags);
+ putbe32(comp_len, data + entry_cl2_offset_comp_len);
+ putbe32(uncomp_len, data + entry_cl2_offset_uncomp_len);
+ putbe32(parent_1, data + entry_cl2_offset_parent_1);
+ putbe32(parent_2, data + entry_cl2_offset_parent_2);
+ memcpy(data + entry_cl2_offset_node_id, c_node_id,
+ c_node_id_len);
+ putbe64(sidedata_offset,
+ data + entry_cl2_offset_sidedata_offset);
+ putbe32(sidedata_comp_len,
+ data + entry_cl2_offset_sidedata_comp_len);
+ comp_field = data_comp_mode & 3;
+ comp_field = comp_field | (sidedata_comp_mode & 3) << 2;
+ data[entry_cl2_offset_all_comp_mode] = comp_field;
+ putbe32(rank, data + entry_cl2_offset_rank);
+ } else {
+ raise_revlog_error();
+ return NULL;
}
if (self->ntinitialized)
@@ -574,10 +764,28 @@
/* Find the newly added node, offset from the "already on-disk" length
*/
data = self->added + self->entry_size * (rev - self->length);
- putbe64(offset_flags, data);
- putbe64(sidedata_offset, data + 64);
- putbe32(sidedata_comp_len, data + 72);
- data[76] = (data[76] & ~(3 << 2)) | ((comp_mode & 3) << 2);
+ if (self->format_version == format_v2) {
+ putbe64(offset_flags, data + entry_v2_offset_high);
+ putbe64(sidedata_offset,
+ data + entry_v2_offset_sidedata_offset);
+ putbe32(sidedata_comp_len,
+ data + entry_v2_offset_sidedata_comp_len);
+ data[entry_v2_offset_all_comp_mode] =
+ (data[entry_v2_offset_all_comp_mode] & ~(3 << 2)) |
+ ((comp_mode & 3) << 2);
+ } else if (self->format_version == format_cl2) {
+ putbe64(offset_flags, data + entry_cl2_offset_high);
+ putbe64(sidedata_offset,
+ data + entry_cl2_offset_sidedata_offset);
+ putbe32(sidedata_comp_len,
+ data + entry_cl2_offset_sidedata_comp_len);
+ data[entry_cl2_offset_all_comp_mode] =
+ (data[entry_cl2_offset_all_comp_mode] & ~(3 << 2)) |
+ ((comp_mode & 3) << 2);
+ } else {
+ raise_revlog_error();
+ return NULL;
+ }
Py_RETURN_NONE;
}
@@ -1120,7 +1328,17 @@
data = index_deref(self, rev);
if (data == NULL)
return -2;
- result = getbe32(data + 16);
+
+ if (self->format_version == format_v1) {
+ result = getbe32(data + entry_v1_offset_base_rev);
+ } else if (self->format_version == format_v2) {
+ result = getbe32(data + entry_v2_offset_base_rev);
+ } else if (self->format_version == format_cl2) {
+ return rev;
+ } else {
+ raise_revlog_error();
+ return -1;
+ }
if (result > rev) {
PyErr_Format(
@@ -2598,8 +2816,10 @@
if (i < 0)
return;
- for (i = start; i < len; i++)
- nt_delete_node(&self->nt, index_deref(self, i) + 32);
+ for (i = start; i < len; i++) {
+ const char *node = index_node(self, i);
+ nt_delete_node(&self->nt, node);
+ }
self->new_length = start - self->length;
}
@@ -2732,9 +2952,18 @@
while (pos + self->entry_size <= end && pos >= 0) {
uint32_t comp_len, sidedata_comp_len = 0;
/* 3rd element of header is length of compressed inline data */
- comp_len = getbe32(data + pos + 8);
- if (self->entry_size == v2_entry_size) {
- sidedata_comp_len = getbe32(data + pos + 72);
+ if (self->format_version == format_v1) {
+ comp_len =
+ getbe32(data + pos + entry_v1_offset_comp_len);
+ sidedata_comp_len = 0;
+ } else if (self->format_version == format_v2) {
+ comp_len =
+ getbe32(data + pos + entry_v2_offset_comp_len);
+ sidedata_comp_len = getbe32(
+ data + pos + entry_v2_offset_sidedata_comp_len);
+ } else {
+ raise_revlog_error();
+ return -1;
}
incr = self->entry_size + comp_len + sidedata_comp_len;
if (offsets)
@@ -2754,10 +2983,10 @@
static int index_init(indexObject *self, PyObject *args, PyObject *kwargs)
{
- PyObject *data_obj, *inlined_obj, *revlogv2;
+ PyObject *data_obj, *inlined_obj;
Py_ssize_t size;
- static char *kwlist[] = {"data", "inlined", "revlogv2", NULL};
+ static char *kwlist[] = {"data", "inlined", "format", NULL};
/* Initialize before argument-checking to avoid index_dealloc() crash.
*/
@@ -2774,10 +3003,11 @@
self->nodelen = 20;
self->nullentry = NULL;
self->rust_ext_compat = 1;
-
- revlogv2 = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist,
- &data_obj, &inlined_obj, &revlogv2))
+ self->format_version = format_v1;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|l", kwlist,
+ &data_obj, &inlined_obj,
+ &(self->format_version)))
return -1;
if (!PyObject_CheckBuffer(data_obj)) {
PyErr_SetString(PyExc_TypeError,
@@ -2789,17 +3019,18 @@
return -1;
}
- if (revlogv2 && PyObject_IsTrue(revlogv2)) {
- self->format_version = format_v2;
- self->entry_size = v2_entry_size;
- } else {
- self->format_version = format_v1;
+ if (self->format_version == format_v1) {
self->entry_size = v1_entry_size;
+ } else if (self->format_version == format_v2) {
+ self->entry_size = v2_entry_size;
+ } else if (self->format_version == format_cl2) {
+ self->entry_size = cl2_entry_size;
}
- self->nullentry = Py_BuildValue(
- PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, -1, -1, -1,
- nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline);
+ self->nullentry =
+ Py_BuildValue(PY23("iiiiiiis#iiBBi", "iiiiiiiy#iiBBi"), 0, 0, 0, -1,
+ -1, -1, -1, nullid, self->nodelen, 0, 0,
+ comp_mode_inline, comp_mode_inline, rank_unknown);
if (!self->nullentry)
return -1;
--- a/mercurial/changegroup.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/changegroup.py Fri Feb 18 14:27:43 2022 +0100
@@ -350,10 +350,11 @@
def ondupchangelog(cl, rev):
if rev < clstart:
- duprevs.append(rev)
+ duprevs.append(rev) # pytype: disable=attribute-error
def onchangelog(cl, rev):
ctx = cl.changelogrevision(rev)
+ assert efilesset is not None # help pytype
efilesset.update(ctx.files)
repo.register_changeset(rev, ctx)
--- a/mercurial/chgserver.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/chgserver.py Fri Feb 18 14:27:43 2022 +0100
@@ -643,6 +643,13 @@
def __init__(self, ui):
self.ui = ui
+
+ # TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level)
+ # when 3.5 support is dropped.
+ self._hashstate = None # type: hashstate
+ self._baseaddress = None # type: bytes
+ self._realaddress = None # type: bytes
+
self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
self._lastactive = time.time()
--- a/mercurial/cmdutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/cmdutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -522,8 +522,10 @@
# 1. filter patch, since we are intending to apply subset of it
try:
chunks, newopts = filterfn(ui, original_headers, match)
- except error.PatchError as err:
+ except error.PatchParseError as err:
raise error.InputError(_(b'error parsing patch: %s') % err)
+ except error.PatchApplicationError as err:
+ raise error.StateError(_(b'error applying patch: %s') % err)
opts.update(newopts)
# We need to keep a backup of files that have been newly added and
@@ -608,8 +610,10 @@
ui.debug(b'applying patch\n')
ui.debug(fp.getvalue())
patch.internalpatch(ui, repo, fp, 1, eolmode=None)
- except error.PatchError as err:
+ except error.PatchParseError as err:
raise error.InputError(pycompat.bytestr(err))
+ except error.PatchApplicationError as err:
+ raise error.StateError(pycompat.bytestr(err))
del fp
# 4. We prepared working directory according to filtered
@@ -2020,9 +2024,16 @@
eolmode=None,
similarity=sim / 100.0,
)
- except error.PatchError as e:
+ except error.PatchParseError as e:
+ raise error.InputError(
+ pycompat.bytestr(e),
+ hint=_(
+ b'check that whitespace in the patch has not been mangled'
+ ),
+ )
+ except error.PatchApplicationError as e:
if not partial:
- raise error.Abort(pycompat.bytestr(e))
+ raise error.StateError(pycompat.bytestr(e))
if partial:
rejects = True
@@ -2079,8 +2090,15 @@
files,
eolmode=None,
)
- except error.PatchError as e:
- raise error.Abort(stringutil.forcebytestr(e))
+ except error.PatchParseError as e:
+ raise error.InputError(
+ stringutil.forcebytestr(e),
+ hint=_(
+ b'check that whitespace in the patch has not been mangled'
+ ),
+ )
+ except error.PatchApplicationError as e:
+ raise error.StateError(stringutil.forcebytestr(e))
if opts.get(b'exact'):
editor = None
else:
@@ -3628,15 +3646,14 @@
prntstatusmsg(b'drop', f)
repo.dirstate.set_untracked(f)
- normal = None
- if node == parent:
- # We're reverting to our parent. If possible, we'd like status
- # to report the file as clean. We have to use normallookup for
- # merges to avoid losing information about merged/dirty files.
- if p2 != repo.nullid:
- normal = repo.dirstate.set_tracked
- else:
- normal = repo.dirstate.set_clean
+ # We are reverting to our parent. If possible, we had like `hg status`
+ # to report the file as clean. We have to be less agressive for
+ # merges to avoid losing information about copy introduced by the merge.
+ # This might comes with bugs ?
+ reset_copy = p2 == repo.nullid
+
+ def normal(filename):
+ return repo.dirstate.set_tracked(filename, reset_copy=reset_copy)
newlyaddedandmodifiedfiles = set()
if interactive:
@@ -3674,8 +3691,10 @@
if operation == b'discard':
chunks = patch.reversehunks(chunks)
- except error.PatchError as err:
- raise error.Abort(_(b'error parsing patch: %s') % err)
+ except error.PatchParseError as err:
+ raise error.InputError(_(b'error parsing patch: %s') % err)
+ except error.PatchApplicationError as err:
+ raise error.StateError(_(b'error applying patch: %s') % err)
# FIXME: when doing an interactive revert of a copy, there's no way of
# performing a partial revert of the added file, the only option is
@@ -3710,8 +3729,10 @@
if dopatch:
try:
patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
- except error.PatchError as err:
- raise error.Abort(pycompat.bytestr(err))
+ except error.PatchParseError as err:
+ raise error.InputError(pycompat.bytestr(err))
+ except error.PatchApplicationError as err:
+ raise error.StateError(pycompat.bytestr(err))
del fp
else:
for f in actions[b'revert'][0]:
@@ -3727,9 +3748,6 @@
checkout(f)
repo.dirstate.set_tracked(f)
- normal = repo.dirstate.set_tracked
- if node == parent and p2 == repo.nullid:
- normal = repo.dirstate.set_clean
for f in actions[b'undelete'][0]:
if interactive:
choice = repo.ui.promptchoice(
--- a/mercurial/color.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/color.py Fri Feb 18 14:27:43 2022 +0100
@@ -248,28 +248,19 @@
if pycompat.iswindows:
from . import win32
- term = encoding.environ.get(b'TERM')
- # TERM won't be defined in a vanilla cmd.exe environment.
-
- # UNIX-like environments on Windows such as Cygwin and MSYS will
- # set TERM. They appear to make a best effort attempt at setting it
- # to something appropriate. However, not all environments with TERM
- # defined support ANSI.
- ansienviron = term and b'xterm' in term
-
if mode == b'auto':
# Since "ansi" could result in terminal gibberish, we error on the
# side of selecting "win32". However, if w32effects is not defined,
# we almost certainly don't support "win32", so don't even try.
# w32effects is not populated when stdout is redirected, so checking
# it first avoids win32 calls in a state known to error out.
- if ansienviron or not w32effects or win32.enablevtmode():
+ if not w32effects or win32.enablevtmode():
realmode = b'ansi'
else:
realmode = b'win32'
# An empty w32effects is a clue that stdout is redirected, and thus
# cannot enable VT mode.
- elif mode == b'ansi' and w32effects and not ansienviron:
+ elif mode == b'ansi' and w32effects:
win32.enablevtmode()
elif mode == b'auto':
realmode = b'ansi'
--- a/mercurial/commands.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/commands.py Fri Feb 18 14:27:43 2022 +0100
@@ -3309,7 +3309,9 @@
overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
base = ctx.p1() if basectx is None else basectx
with ui.configoverride(overrides, b'graft'):
- stats = mergemod.graft(repo, ctx, base, [b'local', b'graft'])
+ stats = mergemod.graft(
+ repo, ctx, base, [b'local', b'graft', b'parent of graft']
+ )
# report any conflicts
if stats.unresolvedcount > 0:
# write out state for --continue
@@ -4914,7 +4916,7 @@
overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
with ui.configoverride(overrides, b'merge'):
force = opts.get(b'force')
- labels = [b'working copy', b'merge rev']
+ labels = [b'working copy', b'merge rev', b'common ancestor']
return hg.merge(ctx, force=force, labels=labels)
@@ -6130,7 +6132,6 @@
ret = 0
didwork = False
- tocomplete = []
hasconflictmarkers = []
if mark:
markcheck = ui.config(b'commands', b'resolve.mark-check')
@@ -6183,24 +6184,20 @@
# preresolve file
overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
with ui.configoverride(overrides, b'resolve'):
- complete, r = ms.preresolve(f, wctx)
- if not complete:
- tocomplete.append(f)
- elif r:
+ r = ms.resolve(f, wctx)
+ if r:
ret = 1
finally:
ms.commit()
- # replace filemerge's .orig file with our resolve file, but only
- # for merges that are complete
- if complete:
- try:
- util.rename(
- a + b".resolve", scmutil.backuppath(ui, repo, f)
- )
- except OSError as inst:
- if inst.errno != errno.ENOENT:
- raise
+ # replace filemerge's .orig file with our resolve file
+ try:
+ util.rename(
+ a + b".resolve", scmutil.backuppath(ui, repo, f)
+ )
+ except OSError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
if hasconflictmarkers:
ui.warn(
@@ -6218,25 +6215,6 @@
hint=_(b'use --all to mark anyway'),
)
- for f in tocomplete:
- try:
- # resolve file
- overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
- with ui.configoverride(overrides, b'resolve'):
- r = ms.resolve(f, wctx)
- if r:
- ret = 1
- finally:
- ms.commit()
-
- # replace filemerge's .orig file with our resolve file
- a = repo.wjoin(f)
- try:
- util.rename(a + b".resolve", scmutil.backuppath(ui, repo, f))
- except OSError as inst:
- if inst.errno != errno.ENOENT:
- raise
-
ms.commit()
branchmerge = repo.dirstate.p2() != repo.nullid
# resolve is not doing a parent change here, however, `record updates`
@@ -6897,9 +6875,9 @@
cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
opts = pycompat.byteskwargs(opts)
- revs = opts.get(b'rev')
- change = opts.get(b'change')
- terse = opts.get(b'terse')
+ revs = opts.get(b'rev', [])
+ change = opts.get(b'change', b'')
+ terse = opts.get(b'terse', _NOTTERSE)
if terse is _NOTTERSE:
if revs:
terse = b''
@@ -7832,9 +7810,9 @@
raise error.InputError(_(b"you can't specify a revision and a date"))
updatecheck = None
- if check:
+ if check or merge is not None and not merge:
updatecheck = b'abort'
- elif merge:
+ elif merge or check is not None and not check:
updatecheck = b'none'
with repo.wlock():
--- a/mercurial/commit.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/commit.py Fri Feb 18 14:27:43 2022 +0100
@@ -134,7 +134,13 @@
for s in salvaged:
files.mark_salvaged(s)
- if ctx.manifestnode():
+ narrow_files = {}
+ if not ctx.repo().narrowmatch().always():
+ for f, e in ms.allextras().items():
+ action = e.get(b'outside-narrow-merge-action')
+ if action is not None:
+ narrow_files[f] = action
+ if ctx.manifestnode() and not narrow_files:
# reuse an existing manifest revision
repo.ui.debug(b'reusing known manifest\n')
mn = ctx.manifestnode()
@@ -142,11 +148,11 @@
if writechangesetcopy:
files.update_added(ctx.filesadded())
files.update_removed(ctx.filesremoved())
- elif not ctx.files():
+ elif not ctx.files() and not narrow_files:
repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
mn = p1.manifestnode()
else:
- mn = _process_files(tr, ctx, ms, files, error=error)
+ mn = _process_files(tr, ctx, ms, files, narrow_files, error=error)
if origctx and origctx.manifestnode() == mn:
origfiles = origctx.files()
@@ -177,7 +183,7 @@
return salvaged
-def _process_files(tr, ctx, ms, files, error=False):
+def _process_files(tr, ctx, ms, files, narrow_files=None, error=False):
repo = ctx.repo()
p1 = ctx.p1()
p2 = ctx.p2()
@@ -198,8 +204,33 @@
linkrev = len(repo)
repo.ui.note(_(b"committing files:\n"))
uipathfn = scmutil.getuipathfn(repo)
- for f in sorted(ctx.modified() + ctx.added()):
+ all_files = ctx.modified() + ctx.added()
+ all_files.extend(narrow_files.keys())
+ all_files.sort()
+ for f in all_files:
repo.ui.note(uipathfn(f) + b"\n")
+ if f in narrow_files:
+ narrow_action = narrow_files.get(f)
+ if narrow_action == mergestate.CHANGE_REMOVED:
+ files.mark_removed(f)
+ removed.append(f)
+ elif narrow_action == mergestate.CHANGE_ADDED:
+ files.mark_added(f)
+ added.append(f)
+ m[f] = m2[f]
+ flags = m2ctx.find(f)[1] or b''
+ m.setflag(f, flags)
+ elif narrow_action == mergestate.CHANGE_MODIFIED:
+ files.mark_touched(f)
+ added.append(f)
+ m[f] = m2[f]
+ flags = m2ctx.find(f)[1] or b''
+ m.setflag(f, flags)
+ else:
+ msg = _(b"corrupted mergestate, unknown narrow action: %b")
+ hint = _(b"restart the merge")
+ raise error.Abort(msg, hint=hint)
+ continue
try:
fctx = ctx[f]
if fctx is None:
@@ -239,7 +270,17 @@
if not rf(f):
files.mark_removed(f)
- mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
+ mn = _commit_manifest(
+ tr,
+ linkrev,
+ ctx,
+ mctx,
+ m,
+ files.touched,
+ added,
+ drop,
+ bool(narrow_files),
+ )
return mn
@@ -409,7 +450,17 @@
return fnode, touched
-def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
+def _commit_manifest(
+ tr,
+ linkrev,
+ ctx,
+ mctx,
+ manifest,
+ files,
+ added,
+ drop,
+ has_some_narrow_action=False,
+):
"""make a new manifest entry (or reuse a new one)
given an initialised manifest context and precomputed list of
@@ -451,6 +502,10 @@
# at this point is merges, and we already error out in the
# case where the merge has files outside of the narrowspec,
# so this is safe.
+ if has_some_narrow_action:
+ match = None
+ else:
+ match = repo.narrowmatch()
mn = mctx.write(
tr,
linkrev,
@@ -458,7 +513,7 @@
p2.manifestnode(),
added,
drop,
- match=repo.narrowmatch(),
+ match=match,
)
else:
repo.ui.debug(
--- a/mercurial/configitems.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/configitems.py Fri Feb 18 14:27:43 2022 +0100
@@ -1042,11 +1042,6 @@
)
coreconfigitem(
b'experimental',
- b'mergetempdirprefix',
- default=None,
-)
-coreconfigitem(
- b'experimental',
b'mmapindexthreshold',
default=None,
)
@@ -1102,16 +1097,6 @@
)
coreconfigitem(
b'experimental',
- b'httppeer.advertise-v2',
- default=False,
-)
-coreconfigitem(
- b'experimental',
- b'httppeer.v2-encoder-order',
- default=None,
-)
-coreconfigitem(
- b'experimental',
b'httppostargs',
default=False,
)
@@ -1211,11 +1196,6 @@
)
coreconfigitem(
b'experimental',
- b'sshserver.support-v2',
- default=False,
-)
-coreconfigitem(
- b'experimental',
b'sparse-read',
default=False,
)
@@ -1241,26 +1221,6 @@
)
coreconfigitem(
b'experimental',
- b'sshpeer.advertise-v2',
- default=False,
-)
-coreconfigitem(
- b'experimental',
- b'web.apiserver',
- default=False,
-)
-coreconfigitem(
- b'experimental',
- b'web.api.http-v2',
- default=False,
-)
-coreconfigitem(
- b'experimental',
- b'web.api.debugreflect',
- default=False,
-)
-coreconfigitem(
- b'experimental',
b'web.full-garbage-collection-rate',
default=1, # still forcing a full collection on each request
)
@@ -1281,11 +1241,17 @@
)
coreconfigitem(
b'extensions',
- b'.*',
+ b'[^:]*',
default=None,
generic=True,
)
coreconfigitem(
+ b'extensions',
+ b'[^:]*:required',
+ default=False,
+ generic=True,
+)
+coreconfigitem(
b'extdata',
b'.*',
default=None,
@@ -1313,6 +1279,18 @@
)
coreconfigitem(
b'format',
+ b'use-dirstate-tracked-hint',
+ default=False,
+ experimental=True,
+)
+coreconfigitem(
+ b'format',
+ b'use-dirstate-tracked-hint.version',
+ default=1,
+ experimental=True,
+)
+coreconfigitem(
+ b'format',
b'dotencode',
default=True,
)
@@ -1352,10 +1330,10 @@
)
# Experimental TODOs:
#
-# * Same as for evlogv2 (but for the reduction of the number of files)
+# * Same as for revlogv2 (but for the reduction of the number of files)
+# * Actually computing the rank of changesets
# * Improvement to investigate
# - storing .hgtags fnode
-# - storing `rank` of changesets
# - storing branch related identifier
coreconfigitem(
@@ -1405,7 +1383,7 @@
coreconfigitem(
b'format',
b'use-share-safe',
- default=False,
+ default=True,
)
coreconfigitem(
b'format',
--- a/mercurial/context.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/context.py Fri Feb 18 14:27:43 2022 +0100
@@ -20,7 +20,6 @@
)
from .pycompat import (
getattr,
- open,
)
from . import (
dagop,
@@ -46,6 +45,9 @@
dateutil,
stringutil,
)
+from .dirstateutils import (
+ timestamp,
+)
propertycache = util.propertycache
@@ -682,6 +684,14 @@
"""Return a list of byte bookmark names."""
return self._repo.nodebookmarks(self._node)
+ def fast_rank(self):
+ repo = self._repo
+ if self._maybe_filtered:
+ cl = repo.changelog
+ else:
+ cl = repo.unfiltered().changelog
+ return cl.fast_rank(self._rev)
+
def phase(self):
return self._repo._phasecache.phase(self._repo, self._rev)
@@ -1793,13 +1803,14 @@
sane.append(f)
return sane
- def _checklookup(self, files):
+ def _checklookup(self, files, mtime_boundary):
# check for any possibly clean files
if not files:
- return [], [], []
+ return [], [], [], []
modified = []
deleted = []
+ clean = []
fixup = []
pctx = self._parents[0]
# do a full compare of any files that might have changed
@@ -1813,8 +1824,18 @@
or pctx[f].cmp(self[f])
):
modified.append(f)
+ elif mtime_boundary is None:
+ clean.append(f)
else:
- fixup.append(f)
+ s = self[f].lstat()
+ mode = s.st_mode
+ size = s.st_size
+ file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
+ if file_mtime is not None:
+ cache_info = (mode, size, file_mtime)
+ fixup.append((f, cache_info))
+ else:
+ clean.append(f)
except (IOError, OSError):
# A file become inaccessible in between? Mark it as deleted,
# matching dirstate behavior (issue5584).
@@ -1824,7 +1845,7 @@
# it's in the dirstate.
deleted.append(f)
- return modified, deleted, fixup
+ return modified, deleted, clean, fixup
def _poststatusfixup(self, status, fixup):
"""update dirstate for files that are actually clean"""
@@ -1842,13 +1863,13 @@
if dirstate.identity() == oldid:
if fixup:
if dirstate.pendingparentchange():
- normal = lambda f: dirstate.update_file(
+ normal = lambda f, pfd: dirstate.update_file(
f, p1_tracked=True, wc_tracked=True
)
else:
normal = dirstate.set_clean
- for f in fixup:
- normal(f)
+ for f, pdf in fixup:
+ normal(f, pdf)
# write changes out explicitly, because nesting
# wlock at runtime may prevent 'wlock.release()'
# after this block from doing so for subsequent
@@ -1878,19 +1899,23 @@
subrepos = []
if b'.hgsub' in self:
subrepos = sorted(self.substate)
- cmp, s = self._repo.dirstate.status(
+ cmp, s, mtime_boundary = self._repo.dirstate.status(
match, subrepos, ignored=ignored, clean=clean, unknown=unknown
)
# check for any possibly clean files
fixup = []
if cmp:
- modified2, deleted2, fixup = self._checklookup(cmp)
+ modified2, deleted2, clean_set, fixup = self._checklookup(
+ cmp, mtime_boundary
+ )
s.modified.extend(modified2)
s.deleted.extend(deleted2)
+ if clean_set and clean:
+ s.clean.extend(clean_set)
if fixup and clean:
- s.clean.extend(fixup)
+ s.clean.extend((f for f, _ in fixup))
self._poststatusfixup(s, fixup)
@@ -3111,13 +3136,11 @@
return util.readfile(self._path)
def decodeddata(self):
- with open(self._path, b"rb") as f:
- return f.read()
+ return util.readfile(self._path)
def remove(self):
util.unlink(self._path)
def write(self, data, flags, **kwargs):
assert not flags
- with open(self._path, b"wb") as f:
- f.write(data)
+ util.writefile(self._path, data)
--- a/mercurial/copies.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/copies.py Fri Feb 18 14:27:43 2022 +0100
@@ -246,7 +246,6 @@
return {}
repo = a.repo().unfiltered()
- children = {}
cl = repo.changelog
isancestor = cl.isancestorrev
@@ -290,7 +289,7 @@
# no common revision to track copies from
return {}
if has_graph_roots:
- # this deal with the special case mentionned in the [1] footnotes. We
+ # this deal with the special case mentioned in the [1] footnotes. We
# must filter out revisions that leads to non-common graphroots.
roots = list(roots)
m = min(roots)
@@ -301,11 +300,11 @@
if repo.filecopiesmode == b'changeset-sidedata':
# When using side-data, we will process the edges "from" the children.
- # We iterate over the childre, gathering previous collected data for
+ # We iterate over the children, gathering previous collected data for
# the parents. Do know when the parents data is no longer necessary, we
# keep a counter of how many children each revision has.
#
- # An interresting property of `children_count` is that it only contains
+ # An interesting property of `children_count` is that it only contains
# revision that will be relevant for a edge of the graph. So if a
# children has parent not in `children_count`, that edges should not be
# processed.
@@ -449,7 +448,11 @@
# filter out internal details and return a {dest: source mapping}
final_copies = {}
- for dest, (tt, source) in all_copies[targetrev].items():
+
+ targetrev_items = all_copies[targetrev]
+ assert targetrev_items is not None # help pytype
+
+ for dest, (tt, source) in targetrev_items.items():
if source is not None:
final_copies[dest] = source
if not alwaysmatch:
--- a/mercurial/dagop.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/dagop.py Fri Feb 18 14:27:43 2022 +0100
@@ -9,7 +9,6 @@
import heapq
-from .node import nullrev
from .thirdparty import attr
from .node import nullrev
from . import (
--- a/mercurial/debugcommands.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/debugcommands.py Fri Feb 18 14:27:43 2022 +0100
@@ -91,7 +91,6 @@
vfs as vfsmod,
wireprotoframing,
wireprotoserver,
- wireprotov2peer,
)
from .interfaces import repository
from .utils import (
@@ -179,6 +178,12 @@
_(b'add single file all revs overwrite'),
),
(b'n', b'new-file', None, _(b'add new file at each rev')),
+ (
+ b'',
+ b'from-existing',
+ None,
+ _(b'continue from a non-empty repository'),
+ ),
],
_(b'[OPTION]... [TEXT]'),
)
@@ -189,6 +194,7 @@
mergeable_file=False,
overwritten_file=False,
new_file=False,
+ from_existing=False,
):
"""builds a repo with a given DAG from scratch in the current empty repo
@@ -227,7 +233,7 @@
text = ui.fin.read()
cl = repo.changelog
- if len(cl) > 0:
+ if len(cl) > 0 and not from_existing:
raise error.Abort(_(b'repository is not empty'))
# determine number of revs in DAG
@@ -273,7 +279,10 @@
x[fn].data() for x in (pa, p1, p2)
]
m3 = simplemerge.Merge3Text(base, local, other)
- ml = [l.strip() for l in m3.merge_lines()]
+ ml = [
+ l.strip()
+ for l in simplemerge.render_minimized(m3)[0]
+ ]
ml.append(b"")
elif at > 0:
ml = p1[fn].data().split(b"\n")
@@ -4352,8 +4361,8 @@
``--peer`` can be used to bypass the handshake protocol and construct a
peer instance using the specified class type. Valid values are ``raw``,
- ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
- raw data payloads and don't support higher-level command actions.
+ ``ssh1``. ``raw`` instances only allow sending raw data payloads and
+ don't support higher-level command actions.
``--noreadstderr`` can be used to disable automatic reading from stderr
of the peer (for SSH connections only). Disabling automatic reading of
@@ -4528,13 +4537,11 @@
if opts[b'peer'] and opts[b'peer'] not in (
b'raw',
- b'http2',
b'ssh1',
- b'ssh2',
):
raise error.Abort(
_(b'invalid value for --peer'),
- hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
+ hint=_(b'valid values are "raw" and "ssh1"'),
)
if path and opts[b'localssh']:
@@ -4602,18 +4609,6 @@
None,
autoreadstderr=autoreadstderr,
)
- elif opts[b'peer'] == b'ssh2':
- ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
- peer = sshpeer.sshv2peer(
- ui,
- url,
- proc,
- stdin,
- stdout,
- stderr,
- None,
- autoreadstderr=autoreadstderr,
- )
elif opts[b'peer'] == b'raw':
ui.write(_(b'using raw connection to peer\n'))
peer = None
@@ -4666,34 +4661,7 @@
opener = urlmod.opener(ui, authinfo, **openerargs)
- if opts[b'peer'] == b'http2':
- ui.write(_(b'creating http peer for wire protocol version 2\n'))
- # We go through makepeer() because we need an API descriptor for
- # the peer instance to be useful.
- maybe_silent = (
- ui.silent()
- if opts[b'nologhandshake']
- else util.nullcontextmanager()
- )
- with maybe_silent, ui.configoverride(
- {(b'experimental', b'httppeer.advertise-v2'): True}
- ):
- peer = httppeer.makepeer(ui, path, opener=opener)
-
- if not isinstance(peer, httppeer.httpv2peer):
- raise error.Abort(
- _(
- b'could not instantiate HTTP peer for '
- b'wire protocol version 2'
- ),
- hint=_(
- b'the server may not have the feature '
- b'enabled or is not allowing this '
- b'client version'
- ),
- )
-
- elif opts[b'peer'] == b'raw':
+ if opts[b'peer'] == b'raw':
ui.write(_(b'using raw connection to peer\n'))
peer = None
elif opts[b'peer']:
@@ -4774,17 +4742,10 @@
with peer.commandexecutor() as e:
res = e.callcommand(command, args).result()
- if isinstance(res, wireprotov2peer.commandresponse):
- val = res.objects()
- ui.status(
- _(b'response: %s\n')
- % stringutil.pprint(val, bprefix=True, indent=2)
- )
- else:
- ui.status(
- _(b'response: %s\n')
- % stringutil.pprint(res, bprefix=True, indent=2)
- )
+ ui.status(
+ _(b'response: %s\n')
+ % stringutil.pprint(res, bprefix=True, indent=2)
+ )
elif action == b'batchbegin':
if batchedcommands is not None:
--- a/mercurial/destutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/destutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -65,9 +65,8 @@
# replaced changesets: same as divergent except we know there
# is no conflict
#
- # pruned changeset: no update is done; though, we could
- # consider updating to the first non-obsolete parent,
- # similar to what is current done for 'hg prune'
+ # pruned changeset: update to the closest non-obsolete ancestor,
+ # similar to what 'hg prune' currently does
if successors:
# flatten the list here handles both divergent (len > 1)
@@ -77,8 +76,15 @@
# get the max revision for the given successors set,
# i.e. the 'tip' of a set
node = repo.revs(b'max(%ln)', successors).first()
- if bookmarks.isactivewdirparent(repo):
- movemark = repo[b'.'].node()
+ else:
+ p1 = p1.p1()
+ while p1.obsolete():
+ p1 = p1.p1()
+ node = p1.node()
+
+ if node is not None and bookmarks.isactivewdirparent(repo):
+ movemark = repo[b'.'].node()
+
return node, movemark, None
--- a/mercurial/dirstate.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/dirstate.py Fri Feb 18 14:27:43 2022 +0100
@@ -12,6 +12,7 @@
import errno
import os
import stat
+import uuid
from .i18n import _
from .pycompat import delattr
@@ -23,6 +24,7 @@
encoding,
error,
match as matchmod,
+ node,
pathutil,
policy,
pycompat,
@@ -66,16 +68,6 @@
return obj._join(fname)
-def _getfsnow(vfs):
- '''Get "now" timestamp on filesystem'''
- tmpfd, tmpname = vfs.mkstemp()
- try:
- return timestamp.mtime_of(os.fstat(tmpfd))
- finally:
- os.close(tmpfd)
- vfs.unlink(tmpname)
-
-
def requires_parents_change(func):
def wrap(self, *args, **kwargs):
if not self.pendingparentchange():
@@ -109,6 +101,7 @@
sparsematchfn,
nodeconstants,
use_dirstate_v2,
+ use_tracked_hint=False,
):
"""Create a new dirstate object.
@@ -117,6 +110,7 @@
the dirstate.
"""
self._use_dirstate_v2 = use_dirstate_v2
+ self._use_tracked_hint = use_tracked_hint
self._nodeconstants = nodeconstants
self._opener = opener
self._validate = validate
@@ -125,12 +119,15 @@
# ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
# UNC path pointing to root share (issue4557)
self._rootdir = pathutil.normasprefix(root)
+ # True is any internal state may be different
self._dirty = False
- self._lastnormaltime = timestamp.zero()
+ # True if the set of tracked file may be different
+ self._dirty_tracked_set = False
self._ui = ui
self._filecache = {}
self._parentwriters = 0
self._filename = b'dirstate'
+ self._filename_th = b'dirstate-tracked-hint'
self._pendingfilename = b'%s.pending' % self._filename
self._plchangecallbacks = {}
self._origpl = None
@@ -332,27 +329,6 @@
return util.pconvert(path)
return path
- def __getitem__(self, key):
- """Return the current state of key (a filename) in the dirstate.
-
- States are:
- n normal
- m needs merging
- r marked for removal
- a marked for addition
- ? not tracked
-
- XXX The "state" is a bit obscure to be in the "public" API. we should
- consider migrating all user of this to going through the dirstate entry
- instead.
- """
- msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
- util.nouideprecwarn(msg, b'6.1', stacklevel=2)
- entry = self._map.get(key)
- if entry is not None:
- return entry.state
- return b'?'
-
def get_entry(self, path):
"""return a DirstateItem for the associated path"""
entry = self._map.get(path)
@@ -440,8 +416,8 @@
for a in ("_map", "_branch", "_ignore"):
if a in self.__dict__:
delattr(self, a)
- self._lastnormaltime = timestamp.zero()
self._dirty = False
+ self._dirty_tracked_set = False
self._parentwriters = 0
self._origpl = None
@@ -462,19 +438,26 @@
return self._map.copymap
@requires_no_parents_change
- def set_tracked(self, filename):
+ def set_tracked(self, filename, reset_copy=False):
"""a "public" method for generic code to mark a file as tracked
This function is to be called outside of "update/merge" case. For
example by a command like `hg add X`.
+ if reset_copy is set, any existing copy information will be dropped.
+
return True the file was previously untracked, False otherwise.
"""
self._dirty = True
entry = self._map.get(filename)
if entry is None or not entry.tracked:
self._check_new_tracked_filename(filename)
- return self._map.set_tracked(filename)
+ pre_tracked = self._map.set_tracked(filename)
+ if reset_copy:
+ self._map.copymap.pop(filename, None)
+ if pre_tracked:
+ self._dirty_tracked_set = True
+ return pre_tracked
@requires_no_parents_change
def set_untracked(self, filename):
@@ -488,24 +471,17 @@
ret = self._map.set_untracked(filename)
if ret:
self._dirty = True
+ self._dirty_tracked_set = True
return ret
@requires_no_parents_change
- def set_clean(self, filename, parentfiledata=None):
+ def set_clean(self, filename, parentfiledata):
"""record that the current state of the file on disk is known to be clean"""
self._dirty = True
- if parentfiledata:
- (mode, size, mtime) = parentfiledata
- else:
- (mode, size, mtime) = self._get_filedata(filename)
if not self._map[filename].tracked:
self._check_new_tracked_filename(filename)
+ (mode, size, mtime) = parentfiledata
self._map.set_clean(filename, mode, size, mtime)
- if mtime > self._lastnormaltime:
- # Remember the most recent modification timeslot for status(),
- # to make sure we won't miss future size-preserving file content
- # modifications that happen within the same timeslot.
- self._lastnormaltime = mtime
@requires_no_parents_change
def set_possibly_dirty(self, filename):
@@ -544,10 +520,6 @@
if entry is not None and entry.added:
return # avoid dropping copy information (maybe?)
- parentfiledata = None
- if wc_tracked and p1_tracked:
- parentfiledata = self._get_filedata(filename)
-
self._map.reset_state(
filename,
wc_tracked,
@@ -555,16 +527,7 @@
# the underlying reference might have changed, we will have to
# check it.
has_meaningful_mtime=False,
- parentfiledata=parentfiledata,
)
- if (
- parentfiledata is not None
- and parentfiledata[2] > self._lastnormaltime
- ):
- # Remember the most recent modification timeslot for status(),
- # to make sure we won't miss future size-preserving file content
- # modifications that happen within the same timeslot.
- self._lastnormaltime = parentfiledata[2]
@requires_parents_change
def update_file(
@@ -593,13 +556,13 @@
# this. The test agrees
self._dirty = True
-
- need_parent_file_data = (
- not possibly_dirty and not p2_info and wc_tracked and p1_tracked
- )
-
- if need_parent_file_data and parentfiledata is None:
- parentfiledata = self._get_filedata(filename)
+ old_entry = self._map.get(filename)
+ if old_entry is None:
+ prev_tracked = False
+ else:
+ prev_tracked = old_entry.tracked
+ if prev_tracked != wc_tracked:
+ self._dirty_tracked_set = True
self._map.reset_state(
filename,
@@ -609,14 +572,6 @@
has_meaningful_mtime=not possibly_dirty,
parentfiledata=parentfiledata,
)
- if (
- parentfiledata is not None
- and parentfiledata[2] > self._lastnormaltime
- ):
- # Remember the most recent modification timeslot for status(),
- # to make sure we won't miss future size-preserving file content
- # modifications that happen within the same timeslot.
- self._lastnormaltime = parentfiledata[2]
def _check_new_tracked_filename(self, filename):
scmutil.checkfilename(filename)
@@ -634,14 +589,6 @@
msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
raise error.Abort(msg)
- def _get_filedata(self, filename):
- """returns"""
- s = os.lstat(self._join(filename))
- mode = s.st_mode
- size = s.st_size
- mtime = timestamp.mtime_of(s)
- return (mode, size, mtime)
-
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
@@ -720,7 +667,6 @@
def clear(self):
self._map.clear()
- self._lastnormaltime = timestamp.zero()
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
@@ -728,9 +674,7 @@
# Rebuild entire dirstate
to_lookup = allfiles
to_drop = []
- lastnormaltime = self._lastnormaltime
self.clear()
- self._lastnormaltime = lastnormaltime
elif len(changedfiles) < 10:
# Avoid turning allfiles into a set, which can be expensive if it's
# large.
@@ -777,28 +721,41 @@
if not self._dirty:
return
- filename = self._filename
+ write_key = self._use_tracked_hint and self._dirty_tracked_set
if tr:
- # 'dirstate.write()' is not only for writing in-memory
- # changes out, but also for dropping ambiguous timestamp.
- # delayed writing re-raise "ambiguous timestamp issue".
- # See also the wiki page below for detail:
- # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
-
- # record when mtime start to be ambiguous
- now = _getfsnow(self._opener)
-
# delay writing in-memory changes out
tr.addfilegenerator(
- b'dirstate',
+ b'dirstate-1-main',
(self._filename,),
- lambda f: self._writedirstate(tr, f, now=now),
+ lambda f: self._writedirstate(tr, f),
location=b'plain',
+ post_finalize=True,
)
+ if write_key:
+ tr.addfilegenerator(
+ b'dirstate-2-key-post',
+ (self._filename_th,),
+ lambda f: self._write_tracked_hint(tr, f),
+ location=b'plain',
+ post_finalize=True,
+ )
return
- st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
- self._writedirstate(tr, st)
+ file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
+ with file(self._filename) as f:
+ self._writedirstate(tr, f)
+ if write_key:
+ # we update the key-file after writing to make sure reader have a
+ # key that match the newly written content
+ with file(self._filename_th) as f:
+ self._write_tracked_hint(tr, f)
+
+ def delete_tracked_hint(self):
+ """remove the tracked_hint file
+
+ To be used by format downgrades operation"""
+ self._opener.unlink(self._filename_th)
+ self._use_tracked_hint = False
def addparentchangecallback(self, category, callback):
"""add a callback to be called when the wd parents are changed
@@ -811,7 +768,7 @@
"""
self._plchangecallbacks[category] = callback
- def _writedirstate(self, tr, st, now=None):
+ def _writedirstate(self, tr, st):
# notify callbacks about parents change
if self._origpl is not None and self._origpl != self._pl:
for c, callback in sorted(
@@ -819,34 +776,13 @@
):
callback(self, self._origpl, self._pl)
self._origpl = None
-
- if now is None:
- # use the modification time of the newly created temporary file as the
- # filesystem's notion of 'now'
- now = timestamp.mtime_of(util.fstat(st))
+ self._map.write(tr, st)
+ self._dirty = False
+ self._dirty_tracked_set = False
- # enough 'delaywrite' prevents 'pack_dirstate' from dropping
- # timestamp of each entries in dirstate, because of 'now > mtime'
- delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
- if delaywrite > 0:
- # do we have any files to delay for?
- for f, e in pycompat.iteritems(self._map):
- if e.need_delay(now):
- import time # to avoid useless import
-
- # rather than sleep n seconds, sleep until the next
- # multiple of n seconds
- clock = time.time()
- start = int(clock) - (int(clock) % delaywrite)
- end = start + delaywrite
- time.sleep(end - clock)
- # trust our estimate that the end is near now
- now = timestamp.timestamp((end, 0))
- break
-
- self._map.write(tr, st, now)
- self._lastnormaltime = timestamp.zero()
- self._dirty = False
+ def _write_tracked_hint(self, tr, f):
+ key = node.hex(uuid.uuid4().bytes)
+ f.write(b"1\n%s\n" % key) # 1 is the format version
def _dirignore(self, f):
if self._ignore(f):
@@ -1243,7 +1179,6 @@
self._rootdir,
self._ignorefiles(),
self._checkexec,
- self._lastnormaltime,
bool(list_clean),
bool(list_ignored),
bool(list_unknown),
@@ -1335,11 +1270,20 @@
# Some matchers have yet to be implemented
use_rust = False
+ # Get the time from the filesystem so we can disambiguate files that
+ # appear modified in the present or future.
+ try:
+ mtime_boundary = timestamp.get_fs_now(self._opener)
+ except OSError:
+ # In largefiles or readonly context
+ mtime_boundary = None
+
if use_rust:
try:
- return self._rust_status(
+ res = self._rust_status(
match, listclean, listignored, listunknown
)
+ return res + (mtime_boundary,)
except rustmod.FallbackError:
pass
@@ -1361,7 +1305,6 @@
checkexec = self._checkexec
checklink = self._checklink
copymap = self._map.copymap
- lastnormaltime = self._lastnormaltime
# We need to do full walks when either
# - we're listing all clean files, or
@@ -1417,19 +1360,17 @@
else:
madd(fn)
elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
- ladd(fn)
- elif timestamp.mtime_of(st) == lastnormaltime:
- # fn may have just been marked as normal and it may have
- # changed in the same second without changing its size.
- # This can happen if we quickly do multiple commits.
- # Force lookup, so we don't miss such a racy file change.
+ # There might be a change in the future if for example the
+ # internal clock is off, but this is a case where the issues
+ # the user would face would be a lot worse and there is
+ # nothing we can really do.
ladd(fn)
elif listclean:
cadd(fn)
status = scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
)
- return (lookup, status)
+ return (lookup, status, mtime_boundary)
def matches(self, match):
"""
@@ -1477,10 +1418,11 @@
# changes written out above, even if dirstate is never
# changed after this
tr.addfilegenerator(
- b'dirstate',
+ b'dirstate-1-main',
(self._filename,),
lambda f: self._writedirstate(tr, f),
location=b'plain',
+ post_finalize=True,
)
# ensure that pending file written above is unlinked at
--- a/mercurial/dirstatemap.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/dirstatemap.py Fri Feb 18 14:27:43 2022 +0100
@@ -444,13 +444,13 @@
self.__getitem__ = self._map.__getitem__
self.get = self._map.get
- def write(self, tr, st, now):
+ def write(self, tr, st):
if self._use_dirstate_v2:
- packed, meta = v2.pack_dirstate(self._map, self.copymap, now)
+ packed, meta = v2.pack_dirstate(self._map, self.copymap)
self.write_v2_no_append(tr, st, meta, packed)
else:
packed = parsers.pack_dirstate(
- self._map, self.copymap, self.parents(), now
+ self._map, self.copymap, self.parents()
)
st.write(packed)
st.close()
@@ -655,10 +655,10 @@
self._map
return self.identity
- def write(self, tr, st, now):
+ def write(self, tr, st):
if not self._use_dirstate_v2:
p1, p2 = self.parents()
- packed = self._map.write_v1(p1, p2, now)
+ packed = self._map.write_v1(p1, p2)
st.write(packed)
st.close()
self._dirtyparents = False
@@ -666,7 +666,7 @@
# We can only append to an existing data file if there is one
can_append = self.docket.uuid is not None
- packed, meta, append = self._map.write_v2(now, can_append)
+ packed, meta, append = self._map.write_v2(can_append)
if append:
docket = self.docket
data_filename = docket.data_filename()
--- a/mercurial/dirstateutils/timestamp.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/dirstateutils/timestamp.py Fri Feb 18 14:27:43 2022 +0100
@@ -6,8 +6,11 @@
from __future__ import absolute_import
import functools
+import os
import stat
+from .. import error
+
rangemask = 0x7FFFFFFF
@@ -18,40 +21,45 @@
A Unix timestamp with optional nanoseconds precision,
modulo 2**31 seconds.
- A 2-tuple containing:
+ A 3-tuple containing:
`truncated_seconds`: seconds since the Unix epoch,
truncated to its lower 31 bits
`subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
When this is zero, the sub-second precision is considered unknown.
+
+ `second_ambiguous`: whether this timestamp is still "reliable"
+ (see `reliable_mtime_of`) if we drop its sub-second component.
"""
def __new__(cls, value):
- truncated_seconds, subsec_nanos = value
- value = (truncated_seconds & rangemask, subsec_nanos)
+ truncated_seconds, subsec_nanos, second_ambiguous = value
+ value = (truncated_seconds & rangemask, subsec_nanos, second_ambiguous)
return super(timestamp, cls).__new__(cls, value)
def __eq__(self, other):
- self_secs, self_subsec_nanos = self
- other_secs, other_subsec_nanos = other
- return self_secs == other_secs and (
- self_subsec_nanos == other_subsec_nanos
- or self_subsec_nanos == 0
- or other_subsec_nanos == 0
+ raise error.ProgrammingError(
+ 'timestamp should never be compared directly'
)
def __gt__(self, other):
- self_secs, self_subsec_nanos = self
- other_secs, other_subsec_nanos = other
- if self_secs > other_secs:
- return True
- if self_secs < other_secs:
- return False
- if self_subsec_nanos == 0 or other_subsec_nanos == 0:
- # they are considered equal, so not "greater than"
- return False
- return self_subsec_nanos > other_subsec_nanos
+ raise error.ProgrammingError(
+ 'timestamp should never be compared directly'
+ )
+
+
+def get_fs_now(vfs):
+ """return a timestamp for "now" in the current vfs
+
+ This will raise an exception if no temporary files could be created.
+ """
+ tmpfd, tmpname = vfs.mkstemp()
+ try:
+ return mtime_of(os.fstat(tmpfd))
+ finally:
+ os.close(tmpfd)
+ vfs.unlink(tmpname)
def zero():
@@ -84,4 +92,37 @@
secs = nanos // billion
subsec_nanos = nanos % billion
- return timestamp((secs, subsec_nanos))
+ return timestamp((secs, subsec_nanos, False))
+
+
+def reliable_mtime_of(stat_result, present_mtime):
+ """Same as `mtime_of`, but return `None` or a `Timestamp` with
+ `second_ambiguous` set if the date might be ambiguous.
+
+ A modification time is reliable if it is older than "present_time" (or
+ sufficiently in the future).
+
+ Otherwise a concurrent modification might happens with the same mtime.
+ """
+ file_mtime = mtime_of(stat_result)
+ file_second = file_mtime[0]
+ file_ns = file_mtime[1]
+ boundary_second = present_mtime[0]
+ boundary_ns = present_mtime[1]
+ # If the mtime of the ambiguous file is younger (or equal) to the starting
+ # point of the `status` walk, we cannot garantee that another, racy, write
+ # will not happen right after with the same mtime and we cannot cache the
+ # information.
+ #
+ # However if the mtime is far away in the future, this is likely some
+ # mismatch between the current clock and previous file system operation. So
+ # mtime more than one days in the future are considered fine.
+ if boundary_second == file_second:
+ if file_ns and boundary_ns:
+ if file_ns < boundary_ns:
+ return timestamp((file_second, file_ns, True))
+ return None
+ elif boundary_second < file_second < (3600 * 24 + boundary_second):
+ return None
+ else:
+ return file_mtime
--- a/mercurial/dirstateutils/v2.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/dirstateutils/v2.py Fri Feb 18 14:27:43 2022 +0100
@@ -174,12 +174,10 @@
)
-def pack_dirstate(map, copy_map, now):
+def pack_dirstate(map, copy_map):
"""
Pack `map` and `copy_map` into the dirstate v2 binary format and return
the bytearray.
- `now` is a timestamp of the current filesystem time used to detect race
- conditions in writing the dirstate to disk, see inline comment.
The on-disk format expects a tree-like structure where the leaves are
written first (and sorted per-directory), going up levels until the root
@@ -284,17 +282,6 @@
stack.append(current_node)
for index, (path, entry) in enumerate(sorted_map, 1):
- if entry.need_delay(now):
- # The file was last modified "simultaneously" with the current
- # write to dirstate (i.e. within the same second for file-
- # systems with a granularity of 1 sec). This commonly happens
- # for at least a couple of files on 'update'.
- # The user could change the file without changing its size
- # within the same second. Invalidate the file's mtime in
- # dirstate, forcing future 'status' calls to compare the
- # contents of the file if the size is the same. This prevents
- # mistakenly treating such files as clean.
- entry.set_possibly_dirty()
nodes_with_entry_count += 1
if path in copy_map:
nodes_with_copy_source_count += 1
--- a/mercurial/discovery.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/discovery.py Fri Feb 18 14:27:43 2022 +0100
@@ -19,6 +19,7 @@
bookmarks,
branchmap,
error,
+ obsolete,
phases,
pycompat,
scmutil,
@@ -141,17 +142,6 @@
self._computecommonmissing()
return self._missing
- @property
- def missingheads(self):
- util.nouideprecwarn(
- b'outgoing.missingheads never contained what the name suggests and '
- b'was renamed to outgoing.ancestorsof. check your code for '
- b'correctness.',
- b'5.5',
- stacklevel=2,
- )
- return self.ancestorsof
-
def findcommonoutgoing(
repo, other, onlyheads=None, force=False, commoninc=None, portable=False
@@ -556,12 +546,16 @@
if len(localcandidate) == 1:
return unknownheads | set(candidate_newhs), set()
+ obsrevs = obsolete.getrevs(unfi, b'obsolete')
+ futurenonobsolete = frozenset(futurecommon) - obsrevs
+
# actually process branch replacement
while localcandidate:
nh = localcandidate.pop()
+ r = torev(nh)
current_branch = unfi[nh].branch()
# run this check early to skip the evaluation of the whole branch
- if torev(nh) in futurecommon or ispublic(torev(nh)):
+ if ispublic(r) or r not in obsrevs:
newhs.add(nh)
continue
@@ -583,7 +577,7 @@
# * if we have no markers to push to obsolete it.
if (
any(ispublic(r) for r in branchrevs)
- or any(torev(n) in futurecommon for n in branchnodes)
+ or any(torev(n) in futurenonobsolete for n in branchnodes)
or any(not hasoutmarker(n) for n in branchnodes)
):
newhs.add(nh)
--- a/mercurial/encoding.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/encoding.py Fri Feb 18 14:27:43 2022 +0100
@@ -511,17 +511,21 @@
if width <= 0: # no enough room even for ellipsis
return ellipsis[: width + len(ellipsis)]
+ chars = list(u)
if leftside:
- uslice = lambda i: u[i:]
- concat = lambda s: ellipsis + s
- else:
- uslice = lambda i: u[:-i]
- concat = lambda s: s + ellipsis
- for i in pycompat.xrange(1, len(u)):
- usub = uslice(i)
- if ucolwidth(usub) <= width:
- return concat(usub.encode(_sysstr(encoding)))
- return ellipsis # no enough room for multi-column characters
+ chars.reverse()
+ width_so_far = 0
+ for i, c in enumerate(chars):
+ width_so_far += ucolwidth(c)
+ if width_so_far > width:
+ break
+ chars = chars[:i]
+ if leftside:
+ chars.reverse()
+ u = u''.join(chars).encode(_sysstr(encoding))
+ if leftside:
+ return ellipsis + u
+ return u + ellipsis
class normcasespecs(object):
--- a/mercurial/error.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/error.py Fri Feb 18 14:27:43 2022 +0100
@@ -388,6 +388,14 @@
__bytes__ = _tobytes
+class PatchParseError(PatchError):
+ __bytes__ = _tobytes
+
+
+class PatchApplicationError(PatchError):
+ __bytes__ = _tobytes
+
+
def getsimilar(symbols, value):
# type: (Iterable[bytes], bytes) -> List[bytes]
sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
--- a/mercurial/exchange.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/exchange.py Fri Feb 18 14:27:43 2022 +0100
@@ -22,7 +22,6 @@
changegroup,
discovery,
error,
- exchangev2,
lock as lockmod,
logexchange,
narrowspec,
@@ -522,8 +521,16 @@
def _checksubrepostate(pushop):
"""Ensure all outgoing referenced subrepo revisions are present locally"""
+
+ repo = pushop.repo
+
+ # If the repository does not use subrepos, skip the expensive
+ # manifest checks.
+ if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
+ return
+
for n in pushop.outgoing.missing:
- ctx = pushop.repo[n]
+ ctx = repo[n]
if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
for subpath in sorted(ctx.substate):
@@ -1666,21 +1673,17 @@
):
add_confirm_callback(repo, pullop)
- # Use the modern wire protocol, if available.
- if remote.capable(b'command-changesetdata'):
- exchangev2.pull(pullop)
- else:
- # This should ideally be in _pullbundle2(). However, it needs to run
- # before discovery to avoid extra work.
- _maybeapplyclonebundle(pullop)
- streamclone.maybeperformlegacystreamclone(pullop)
- _pulldiscovery(pullop)
- if pullop.canusebundle2:
- _fullpullbundle2(repo, pullop)
- _pullchangeset(pullop)
- _pullphase(pullop)
- _pullbookmarks(pullop)
- _pullobsolete(pullop)
+ # This should ideally be in _pullbundle2(). However, it needs to run
+ # before discovery to avoid extra work.
+ _maybeapplyclonebundle(pullop)
+ streamclone.maybeperformlegacystreamclone(pullop)
+ _pulldiscovery(pullop)
+ if pullop.canusebundle2:
+ _fullpullbundle2(repo, pullop)
+ _pullchangeset(pullop)
+ _pullphase(pullop)
+ _pullbookmarks(pullop)
+ _pullobsolete(pullop)
# storing remotenames
if repo.ui.configbool(b'experimental', b'remotenames'):
--- a/mercurial/exchangev2.py Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,804 +0,0 @@
-# exchangev2.py - repository exchange for wire protocol version 2
-#
-# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import collections
-import weakref
-
-from .i18n import _
-from .node import short
-from . import (
- bookmarks,
- error,
- mdiff,
- narrowspec,
- phases,
- pycompat,
- requirements as requirementsmod,
- setdiscovery,
-)
-from .interfaces import repository
-
-
-def pull(pullop):
- """Pull using wire protocol version 2."""
- repo = pullop.repo
- remote = pullop.remote
-
- usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
-
- # If this is a clone and it was requested to perform a "stream clone",
- # we obtain the raw files data from the remote then fall back to an
- # incremental pull. This is somewhat hacky and is not nearly robust enough
- # for long-term usage.
- if usingrawchangelogandmanifest:
- with repo.transaction(b'clone'):
- _fetchrawstorefiles(repo, remote)
- repo.invalidate(clearfilecache=True)
-
- tr = pullop.trmanager.transaction()
-
- # We don't use the repo's narrow matcher here because the patterns passed
- # to exchange.pull() could be different.
- narrowmatcher = narrowspec.match(
- repo.root,
- # Empty maps to nevermatcher. So always
- # set includes if missing.
- pullop.includepats or {b'path:.'},
- pullop.excludepats,
- )
-
- if pullop.includepats or pullop.excludepats:
- pathfilter = {}
- if pullop.includepats:
- pathfilter[b'include'] = sorted(pullop.includepats)
- if pullop.excludepats:
- pathfilter[b'exclude'] = sorted(pullop.excludepats)
- else:
- pathfilter = None
-
- # Figure out what needs to be fetched.
- common, fetch, remoteheads = _pullchangesetdiscovery(
- repo, remote, pullop.heads, abortwhenunrelated=pullop.force
- )
-
- # And fetch the data.
- pullheads = pullop.heads or remoteheads
- csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
-
- # New revisions are written to the changelog. But all other updates
- # are deferred. Do those now.
-
- # Ensure all new changesets are draft by default. If the repo is
- # publishing, the phase will be adjusted by the loop below.
- if csetres[b'added']:
- phases.registernew(
- repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
- )
-
- # And adjust the phase of all changesets accordingly.
- for phasenumber, phase in phases.phasenames.items():
- if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
- continue
-
- phases.advanceboundary(
- repo,
- tr,
- phasenumber,
- csetres[b'nodesbyphase'][phase],
- )
-
- # Write bookmark updates.
- bookmarks.updatefromremote(
- repo.ui,
- repo,
- csetres[b'bookmarks'],
- remote.url(),
- pullop.gettransaction,
- explicit=pullop.explicitbookmarks,
- )
-
- manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
-
- # We don't properly support shallow changeset and manifest yet. So we apply
- # depth limiting locally.
- if pullop.depth:
- relevantcsetnodes = set()
- clnode = repo.changelog.node
-
- for rev in repo.revs(
- b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
- ):
- relevantcsetnodes.add(clnode(rev))
-
- csetrelevantfilter = lambda n: n in relevantcsetnodes
-
- else:
- csetrelevantfilter = lambda n: True
-
- # If obtaining the raw store files, we need to scan the full repo to
- # derive all the changesets, manifests, and linkrevs.
- if usingrawchangelogandmanifest:
- csetsforfiles = []
- mnodesforfiles = []
- manifestlinkrevs = {}
-
- for rev in repo:
- ctx = repo[rev]
- node = ctx.node()
-
- if not csetrelevantfilter(node):
- continue
-
- mnode = ctx.manifestnode()
-
- csetsforfiles.append(node)
- mnodesforfiles.append(mnode)
- manifestlinkrevs[mnode] = rev
-
- else:
- csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
- mnodesforfiles = manres[b'added']
- manifestlinkrevs = manres[b'linkrevs']
-
- # Find all file nodes referenced by added manifests and fetch those
- # revisions.
- fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
- _fetchfilesfromcsets(
- repo,
- tr,
- remote,
- pathfilter,
- fnodes,
- csetsforfiles,
- manifestlinkrevs,
- shallow=bool(pullop.depth),
- )
-
-
-def _checkuserawstorefiledata(pullop):
- """Check whether we should use rawstorefiledata command to retrieve data."""
-
- repo = pullop.repo
- remote = pullop.remote
-
- # Command to obtain raw store data isn't available.
- if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
- return False
-
- # Only honor if user requested stream clone operation.
- if not pullop.streamclonerequested:
- return False
-
- # Only works on empty repos.
- if len(repo):
- return False
-
- # TODO This is super hacky. There needs to be a storage API for this. We
- # also need to check for compatibility with the remote.
- if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
- return False
-
- return True
-
-
-def _fetchrawstorefiles(repo, remote):
- with remote.commandexecutor() as e:
- objs = e.callcommand(
- b'rawstorefiledata',
- {
- b'files': [b'changelog', b'manifestlog'],
- },
- ).result()
-
- # First object is a summary of files data that follows.
- overall = next(objs)
-
- progress = repo.ui.makeprogress(
- _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
- )
- with progress:
- progress.update(0)
-
- # Next are pairs of file metadata, data.
- while True:
- try:
- filemeta = next(objs)
- except StopIteration:
- break
-
- for k in (b'location', b'path', b'size'):
- if k not in filemeta:
- raise error.Abort(
- _(b'remote file data missing key: %s') % k
- )
-
- if filemeta[b'location'] == b'store':
- vfs = repo.svfs
- else:
- raise error.Abort(
- _(b'invalid location for raw file data: %s')
- % filemeta[b'location']
- )
-
- bytesremaining = filemeta[b'size']
-
- with vfs.open(filemeta[b'path'], b'wb') as fh:
- while True:
- try:
- chunk = next(objs)
- except StopIteration:
- break
-
- bytesremaining -= len(chunk)
-
- if bytesremaining < 0:
- raise error.Abort(
- _(
- b'received invalid number of bytes for file '
- b'data; expected %d, got extra'
- )
- % filemeta[b'size']
- )
-
- progress.increment(step=len(chunk))
- fh.write(chunk)
-
- try:
- if chunk.islast:
- break
- except AttributeError:
- raise error.Abort(
- _(
- b'did not receive indefinite length bytestring '
- b'for file data'
- )
- )
-
- if bytesremaining:
- raise error.Abort(
- _(
- b'received invalid number of bytes for'
- b'file data; expected %d got %d'
- )
- % (
- filemeta[b'size'],
- filemeta[b'size'] - bytesremaining,
- )
- )
-
-
-def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
- """Determine which changesets need to be pulled."""
-
- if heads:
- knownnode = repo.changelog.hasnode
- if all(knownnode(head) for head in heads):
- return heads, False, heads
-
- # TODO wire protocol version 2 is capable of more efficient discovery
- # than setdiscovery. Consider implementing something better.
- common, fetch, remoteheads = setdiscovery.findcommonheads(
- repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
- )
-
- common = set(common)
- remoteheads = set(remoteheads)
-
- # If a remote head is filtered locally, put it back in the common set.
- # See the comment in exchange._pulldiscoverychangegroup() for more.
-
- if fetch and remoteheads:
- has_node = repo.unfiltered().changelog.index.has_node
-
- common |= {head for head in remoteheads if has_node(head)}
-
- if set(remoteheads).issubset(common):
- fetch = []
-
- common.discard(repo.nullid)
-
- return common, fetch, remoteheads
-
-
-def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
- # TODO consider adding a step here where we obtain the DAG shape first
- # (or ask the server to slice changesets into chunks for us) so that
- # we can perform multiple fetches in batches. This will facilitate
- # resuming interrupted clones, higher server-side cache hit rates due
- # to smaller segments, etc.
- with remote.commandexecutor() as e:
- objs = e.callcommand(
- b'changesetdata',
- {
- b'revisions': [
- {
- b'type': b'changesetdagrange',
- b'roots': sorted(common),
- b'heads': sorted(remoteheads),
- }
- ],
- b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
- },
- ).result()
-
- # The context manager waits on all response data when exiting. So
- # we need to remain in the context manager in order to stream data.
- return _processchangesetdata(repo, tr, objs)
-
-
-def _processchangesetdata(repo, tr, objs):
- repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
-
- urepo = repo.unfiltered()
- cl = urepo.changelog
-
- cl.delayupdate(tr)
-
- # The first emitted object is a header describing the data that
- # follows.
- meta = next(objs)
-
- progress = repo.ui.makeprogress(
- _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
- )
-
- manifestnodes = {}
- added = []
-
- def linkrev(node):
- repo.ui.debug(b'add changeset %s\n' % short(node))
- # Linkrev for changelog is always self.
- return len(cl)
-
- def ondupchangeset(cl, rev):
- added.append(cl.node(rev))
-
- def onchangeset(cl, rev):
- progress.increment()
-
- revision = cl.changelogrevision(rev)
- added.append(cl.node(rev))
-
- # We need to preserve the mapping of changelog revision to node
- # so we can set the linkrev accordingly when manifests are added.
- manifestnodes[rev] = revision.manifest
-
- repo.register_changeset(rev, revision)
-
- nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
- remotebookmarks = {}
-
- # addgroup() expects a 7-tuple describing revisions. This normalizes
- # the wire data to that format.
- #
- # This loop also aggregates non-revision metadata, such as phase
- # data.
- def iterrevisions():
- for cset in objs:
- node = cset[b'node']
-
- if b'phase' in cset:
- nodesbyphase[cset[b'phase']].add(node)
-
- for mark in cset.get(b'bookmarks', []):
- remotebookmarks[mark] = node
-
- # TODO add mechanism for extensions to examine records so they
- # can siphon off custom data fields.
-
- extrafields = {}
-
- for field, size in cset.get(b'fieldsfollowing', []):
- extrafields[field] = next(objs)
-
- # Some entries might only be metadata only updates.
- if b'revision' not in extrafields:
- continue
-
- data = extrafields[b'revision']
-
- yield (
- node,
- cset[b'parents'][0],
- cset[b'parents'][1],
- # Linknode is always itself for changesets.
- cset[b'node'],
- # We always send full revisions. So delta base is not set.
- repo.nullid,
- mdiff.trivialdiffheader(len(data)) + data,
- # Flags not yet supported.
- 0,
- # Sidedata not yet supported
- {},
- )
-
- cl.addgroup(
- iterrevisions(),
- linkrev,
- weakref.proxy(tr),
- alwayscache=True,
- addrevisioncb=onchangeset,
- duplicaterevisioncb=ondupchangeset,
- )
-
- progress.complete()
-
- return {
- b'added': added,
- b'nodesbyphase': nodesbyphase,
- b'bookmarks': remotebookmarks,
- b'manifestnodes': manifestnodes,
- }
-
-
-def _fetchmanifests(repo, tr, remote, manifestnodes):
- rootmanifest = repo.manifestlog.getstorage(b'')
-
- # Some manifests can be shared between changesets. Filter out revisions
- # we already know about.
- fetchnodes = []
- linkrevs = {}
- seen = set()
-
- for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
- if node in seen:
- continue
-
- try:
- rootmanifest.rev(node)
- except error.LookupError:
- fetchnodes.append(node)
- linkrevs[node] = clrev
-
- seen.add(node)
-
- # TODO handle tree manifests
-
- # addgroup() expects 7-tuple describing revisions. This normalizes
- # the wire data to that format.
- def iterrevisions(objs, progress):
- for manifest in objs:
- node = manifest[b'node']
-
- extrafields = {}
-
- for field, size in manifest.get(b'fieldsfollowing', []):
- extrafields[field] = next(objs)
-
- if b'delta' in extrafields:
- basenode = manifest[b'deltabasenode']
- delta = extrafields[b'delta']
- elif b'revision' in extrafields:
- basenode = repo.nullid
- revision = extrafields[b'revision']
- delta = mdiff.trivialdiffheader(len(revision)) + revision
- else:
- continue
-
- yield (
- node,
- manifest[b'parents'][0],
- manifest[b'parents'][1],
- # The value passed in is passed to the lookup function passed
- # to addgroup(). We already have a map of manifest node to
- # changelog revision number. So we just pass in the
- # manifest node here and use linkrevs.__getitem__ as the
- # resolution function.
- node,
- basenode,
- delta,
- # Flags not yet supported.
- 0,
- # Sidedata not yet supported.
- {},
- )
-
- progress.increment()
-
- progress = repo.ui.makeprogress(
- _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
- )
-
- commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
- batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
- # TODO make size configurable on client?
-
- # We send commands 1 at a time to the remote. This is not the most
- # efficient because we incur a round trip at the end of each batch.
- # However, the existing frame-based reactor keeps consuming server
- # data in the background. And this results in response data buffering
- # in memory. This can consume gigabytes of memory.
- # TODO send multiple commands in a request once background buffering
- # issues are resolved.
-
- added = []
-
- for i in pycompat.xrange(0, len(fetchnodes), batchsize):
- batch = [node for node in fetchnodes[i : i + batchsize]]
- if not batch:
- continue
-
- with remote.commandexecutor() as e:
- objs = e.callcommand(
- b'manifestdata',
- {
- b'tree': b'',
- b'nodes': batch,
- b'fields': {b'parents', b'revision'},
- b'haveparents': True,
- },
- ).result()
-
- # Chomp off header object.
- next(objs)
-
- def onchangeset(cl, rev):
- added.append(cl.node(rev))
-
- rootmanifest.addgroup(
- iterrevisions(objs, progress),
- linkrevs.__getitem__,
- weakref.proxy(tr),
- addrevisioncb=onchangeset,
- duplicaterevisioncb=onchangeset,
- )
-
- progress.complete()
-
- return {
- b'added': added,
- b'linkrevs': linkrevs,
- }
-
-
-def _derivefilesfrommanifests(repo, matcher, manifestnodes):
- """Determine what file nodes are relevant given a set of manifest nodes.
-
- Returns a dict mapping file paths to dicts of file node to first manifest
- node.
- """
- ml = repo.manifestlog
- fnodes = collections.defaultdict(dict)
-
- progress = repo.ui.makeprogress(
- _(b'scanning manifests'), total=len(manifestnodes)
- )
-
- with progress:
- for manifestnode in manifestnodes:
- m = ml.get(b'', manifestnode)
-
- # TODO this will pull in unwanted nodes because it takes the storage
- # delta into consideration. What we really want is something that
- # takes the delta between the manifest's parents. And ideally we
- # would ignore file nodes that are known locally. For now, ignore
- # both these limitations. This will result in incremental fetches
- # requesting data we already have. So this is far from ideal.
- md = m.readfast()
-
- for path, fnode in md.items():
- if matcher(path):
- fnodes[path].setdefault(fnode, manifestnode)
-
- progress.increment()
-
- return fnodes
-
-
-def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
- """Fetch file data from explicit file revisions."""
-
- def iterrevisions(objs, progress):
- for filerevision in objs:
- node = filerevision[b'node']
-
- extrafields = {}
-
- for field, size in filerevision.get(b'fieldsfollowing', []):
- extrafields[field] = next(objs)
-
- if b'delta' in extrafields:
- basenode = filerevision[b'deltabasenode']
- delta = extrafields[b'delta']
- elif b'revision' in extrafields:
- basenode = repo.nullid
- revision = extrafields[b'revision']
- delta = mdiff.trivialdiffheader(len(revision)) + revision
- else:
- continue
-
- yield (
- node,
- filerevision[b'parents'][0],
- filerevision[b'parents'][1],
- node,
- basenode,
- delta,
- # Flags not yet supported.
- 0,
- # Sidedata not yet supported.
- {},
- )
-
- progress.increment()
-
- progress = repo.ui.makeprogress(
- _(b'files'),
- unit=_(b'chunks'),
- total=sum(len(v) for v in pycompat.itervalues(fnodes)),
- )
-
- # TODO make batch size configurable
- batchsize = 10000
- fnodeslist = [x for x in sorted(fnodes.items())]
-
- for i in pycompat.xrange(0, len(fnodeslist), batchsize):
- batch = [x for x in fnodeslist[i : i + batchsize]]
- if not batch:
- continue
-
- with remote.commandexecutor() as e:
- fs = []
- locallinkrevs = {}
-
- for path, nodes in batch:
- fs.append(
- (
- path,
- e.callcommand(
- b'filedata',
- {
- b'path': path,
- b'nodes': sorted(nodes),
- b'fields': {b'parents', b'revision'},
- b'haveparents': True,
- },
- ),
- )
- )
-
- locallinkrevs[path] = {
- node: linkrevs[manifestnode]
- for node, manifestnode in pycompat.iteritems(nodes)
- }
-
- for path, f in fs:
- objs = f.result()
-
- # Chomp off header objects.
- next(objs)
-
- store = repo.file(path)
- store.addgroup(
- iterrevisions(objs, progress),
- locallinkrevs[path].__getitem__,
- weakref.proxy(tr),
- )
-
-
-def _fetchfilesfromcsets(
- repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
-):
- """Fetch file data from explicit changeset revisions."""
-
- def iterrevisions(objs, remaining, progress):
- while remaining:
- filerevision = next(objs)
-
- node = filerevision[b'node']
-
- extrafields = {}
-
- for field, size in filerevision.get(b'fieldsfollowing', []):
- extrafields[field] = next(objs)
-
- if b'delta' in extrafields:
- basenode = filerevision[b'deltabasenode']
- delta = extrafields[b'delta']
- elif b'revision' in extrafields:
- basenode = repo.nullid
- revision = extrafields[b'revision']
- delta = mdiff.trivialdiffheader(len(revision)) + revision
- else:
- continue
-
- if b'linknode' in filerevision:
- linknode = filerevision[b'linknode']
- else:
- linknode = node
-
- yield (
- node,
- filerevision[b'parents'][0],
- filerevision[b'parents'][1],
- linknode,
- basenode,
- delta,
- # Flags not yet supported.
- 0,
- # Sidedata not yet supported.
- {},
- )
-
- progress.increment()
- remaining -= 1
-
- progress = repo.ui.makeprogress(
- _(b'files'),
- unit=_(b'chunks'),
- total=sum(len(v) for v in pycompat.itervalues(fnodes)),
- )
-
- commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
- batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
-
- shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
- fields = {b'parents', b'revision'}
- clrev = repo.changelog.rev
-
- # There are no guarantees that we'll have ancestor revisions if
- # a) this repo has shallow file storage b) shallow data fetching is enabled.
- # Force remote to not delta against possibly unknown revisions when these
- # conditions hold.
- haveparents = not (shallowfiles or shallow)
-
- # Similarly, we may not have calculated linkrevs for all incoming file
- # revisions. Ask the remote to do work for us in this case.
- if not haveparents:
- fields.add(b'linknode')
-
- for i in pycompat.xrange(0, len(csets), batchsize):
- batch = [x for x in csets[i : i + batchsize]]
- if not batch:
- continue
-
- with remote.commandexecutor() as e:
- args = {
- b'revisions': [
- {
- b'type': b'changesetexplicit',
- b'nodes': batch,
- }
- ],
- b'fields': fields,
- b'haveparents': haveparents,
- }
-
- if pathfilter:
- args[b'pathfilter'] = pathfilter
-
- objs = e.callcommand(b'filesdata', args).result()
-
- # First object is an overall header.
- overall = next(objs)
-
- # We have overall['totalpaths'] segments.
- for i in pycompat.xrange(overall[b'totalpaths']):
- header = next(objs)
-
- path = header[b'path']
- store = repo.file(path)
-
- linkrevs = {
- fnode: manlinkrevs[mnode]
- for fnode, mnode in pycompat.iteritems(fnodes[path])
- }
-
- def getlinkrev(node):
- if node in linkrevs:
- return linkrevs[node]
- else:
- return clrev(node)
-
- store.addgroup(
- iterrevisions(objs, header[b'totalitems'], progress),
- getlinkrev,
- weakref.proxy(tr),
- maybemissingparents=shallow,
- )
--- a/mercurial/extensions.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/extensions.py Fri Feb 18 14:27:43 2022 +0100
@@ -282,6 +282,7 @@
result = ui.configitems(b"extensions")
if whitelist is not None:
result = [(k, v) for (k, v) in result if k in whitelist]
+ result = [(k, v) for (k, v) in result if b':' not in k]
newindex = len(_order)
ui.log(
b'extension',
@@ -290,6 +291,8 @@
)
ui.log(b'extension', b'- processing %d entries\n', len(result))
with util.timedcm('load all extensions') as stats:
+ default_sub_options = ui.configsuboptions(b"extensions", b"*")[1]
+
for (name, path) in result:
if path:
if path[0:1] == b'!':
@@ -306,18 +309,32 @@
except Exception as inst:
msg = stringutil.forcebytestr(inst)
if path:
- ui.warn(
- _(b"*** failed to import extension %s from %s: %s\n")
- % (name, path, msg)
+ error_msg = _(
+ b'failed to import extension "%s" from %s: %s'
)
+ error_msg %= (name, path, msg)
else:
- ui.warn(
- _(b"*** failed to import extension %s: %s\n")
- % (name, msg)
- )
- if isinstance(inst, error.Hint) and inst.hint:
- ui.warn(_(b"*** (%s)\n") % inst.hint)
- ui.traceback()
+ error_msg = _(b'failed to import extension "%s": %s')
+ error_msg %= (name, msg)
+
+ options = default_sub_options.copy()
+ ext_options = ui.configsuboptions(b"extensions", name)[1]
+ options.update(ext_options)
+ if stringutil.parsebool(options.get(b"required", b'no')):
+ hint = None
+ if isinstance(inst, error.Hint) and inst.hint:
+ hint = inst.hint
+ if hint is None:
+ hint = _(
+ b"loading of this extension was required, "
+ b"see `hg help config.extensions` for details"
+ )
+ raise error.Abort(error_msg, hint=hint)
+ else:
+ ui.warn((b"*** %s\n") % error_msg)
+ if isinstance(inst, error.Hint) and inst.hint:
+ ui.warn(_(b"*** (%s)\n") % inst.hint)
+ ui.traceback()
ui.log(
b'extension',
--- a/mercurial/filelog.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/filelog.py Fri Feb 18 14:27:43 2022 +0100
@@ -97,8 +97,8 @@
def iscensored(self, rev):
return self._revlog.iscensored(rev)
- def revision(self, node, _df=None, raw=False):
- return self._revlog.revision(node, _df=_df, raw=raw)
+ def revision(self, node, _df=None):
+ return self._revlog.revision(node, _df=_df)
def rawdata(self, node, _df=None):
return self._revlog.rawdata(node, _df=_df)
--- a/mercurial/filemerge.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/filemerge.py Fri Feb 18 14:27:43 2022 +0100
@@ -19,7 +19,6 @@
)
from .pycompat import (
getattr,
- open,
)
from . import (
@@ -293,9 +292,9 @@
return None # unknown
-def _matcheol(file, back):
+def _matcheol(file, backup):
"""Convert EOL markers in a file to match origfile"""
- tostyle = _eoltype(back.data()) # No repo.wread filters?
+ tostyle = _eoltype(backup.data()) # No repo.wread filters?
if tostyle:
data = util.readfile(file)
style = _eoltype(data)
@@ -306,27 +305,27 @@
@internaltool(b'prompt', nomerge)
-def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
+def _iprompt(repo, mynode, local, other, base, toolconf):
"""Asks the user which of the local `p1()` or the other `p2()` version to
keep as the merged version."""
ui = repo.ui
- fd = fcd.path()
+ fd = local.fctx.path()
uipathfn = scmutil.getuipathfn(repo)
# Avoid prompting during an in-memory merge since it doesn't support merge
# conflicts.
- if fcd.changectx().isinmemory():
+ if local.fctx.changectx().isinmemory():
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support file conflicts'
)
- prompts = partextras(labels)
+ prompts = partextras([local.label, other.label])
prompts[b'fd'] = uipathfn(fd)
try:
- if fco.isabsent():
+ if other.fctx.isabsent():
index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2)
choice = [b'local', b'other', b'unresolved'][index]
- elif fcd.isabsent():
+ elif local.fctx.isabsent():
index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2)
choice = [b'other', b'local', b'unresolved'][index]
else:
@@ -347,44 +346,48 @@
choice = [b'local', b'other', b'unresolved'][index]
if choice == b'other':
- return _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+ return _iother(repo, mynode, local, other, base, toolconf)
elif choice == b'local':
- return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+ return _ilocal(repo, mynode, local, other, base, toolconf)
elif choice == b'unresolved':
- return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+ return _ifail(repo, mynode, local, other, base, toolconf)
except error.ResponseExpected:
ui.write(b"\n")
- return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
+ return _ifail(repo, mynode, local, other, base, toolconf)
@internaltool(b'local', nomerge)
-def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
+def _ilocal(repo, mynode, local, other, base, toolconf):
"""Uses the local `p1()` version of files as the merged version."""
- return 0, fcd.isabsent()
+ return 0, local.fctx.isabsent()
@internaltool(b'other', nomerge)
-def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
+def _iother(repo, mynode, local, other, base, toolconf):
"""Uses the other `p2()` version of files as the merged version."""
- if fco.isabsent():
+ if other.fctx.isabsent():
# local changed, remote deleted -- 'deleted' picked
- _underlyingfctxifabsent(fcd).remove()
+ _underlyingfctxifabsent(local.fctx).remove()
deleted = True
else:
- _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
+ _underlyingfctxifabsent(local.fctx).write(
+ other.fctx.data(), other.fctx.flags()
+ )
deleted = False
return 0, deleted
@internaltool(b'fail', nomerge)
-def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
+def _ifail(repo, mynode, local, other, base, toolconf):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
# for change/delete conflicts write out the changed version, then fail
- if fcd.isabsent():
- _underlyingfctxifabsent(fcd).write(fco.data(), fco.flags())
+ if local.fctx.isabsent():
+ _underlyingfctxifabsent(local.fctx).write(
+ other.fctx.data(), other.fctx.flags()
+ )
return 1, False
@@ -399,11 +402,18 @@
return filectx
-def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None):
+def _verifytext(input, ui):
+ """verifies that text is non-binary"""
+ if stringutil.binary(input.text()):
+ msg = _(b"%s looks like a binary file.") % input.fctx.path()
+ ui.warn(_(b'warning: %s\n') % msg)
+ raise error.Abort(msg)
+
+
+def _premerge(repo, local, other, base, toolconf):
tool, toolpath, binary, symlink, scriptfn = toolconf
- if symlink or fcd.isabsent() or fco.isabsent():
+ if symlink or local.fctx.isabsent() or other.fctx.isabsent():
return 1
- unused, unused, unused, back = files
ui = repo.ui
@@ -423,26 +433,28 @@
if premerge:
mode = b'merge'
- if premerge in {b'keep-merge3', b'keep-mergediff'}:
- if not labels:
- labels = _defaultconflictlabels
- if len(labels) < 3:
- labels.append(b'base')
- if premerge == b'keep-mergediff':
- mode = b'mergediff'
- r = simplemerge.simplemerge(
- ui, fcd, fca, fco, quiet=True, label=labels, mode=mode
+ if premerge == b'keep-mergediff':
+ mode = b'mergediff'
+ elif premerge == b'keep-merge3':
+ mode = b'merge3'
+ if any(
+ stringutil.binary(input.text()) for input in (local, base, other)
+ ):
+ return 1 # continue merging
+ merged_text, conflicts = simplemerge.simplemerge(
+ local, base, other, mode=mode
)
- if not r:
+ if not conflicts or premerge in validkeep:
+ # fcd.flags() already has the merged flags (done in
+ # mergestate.resolve())
+ local.fctx.write(merged_text, local.fctx.flags())
+ if not conflicts:
ui.debug(b" premerge successful\n")
return 0
- if premerge not in validkeep:
- # restore from backup and try again
- _restorebackup(fcd, back)
return 1 # continue merging
-def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
+def _mergecheck(repo, mynode, fcd, fco, fca, toolconf):
tool, toolpath, binary, symlink, scriptfn = toolconf
uipathfn = scmutil.getuipathfn(repo)
if symlink:
@@ -463,7 +475,7 @@
return True
-def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
+def _merge(repo, local, other, base, mode):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
@@ -471,8 +483,20 @@
of merge, unless mode equals 'union' which suppresses the markers."""
ui = repo.ui
- r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode)
- return True, r, False
+ try:
+ _verifytext(local, ui)
+ _verifytext(base, ui)
+ _verifytext(other, ui)
+ except error.Abort:
+ return True, True, False
+ else:
+ merged_text, conflicts = simplemerge.simplemerge(
+ local, base, other, mode=mode
+ )
+ # fcd.flags() already has the merged flags (done in
+ # mergestate.resolve())
+ local.fctx.write(merged_text, local.fctx.flags())
+ return True, conflicts, False
@internaltool(
@@ -484,14 +508,12 @@
),
precheck=_mergecheck,
)
-def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _iunion(repo, mynode, local, other, base, toolconf, backup):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will use both left and right sides for conflict regions.
No markers are inserted."""
- return _merge(
- repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'union'
- )
+ return _merge(repo, local, other, base, b'union')
@internaltool(
@@ -503,15 +525,13 @@
),
precheck=_mergecheck,
)
-def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _imerge(repo, mynode, local, other, base, toolconf, backup):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Markers will have two sections, one for each side
of merge."""
- return _merge(
- repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'merge'
- )
+ return _merge(repo, local, other, base, b'merge')
@internaltool(
@@ -523,17 +543,13 @@
),
precheck=_mergecheck,
)
-def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _imerge3(repo, mynode, local, other, base, toolconf, backup):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. Marker will have three sections, one from each
side of the merge and one for the base content."""
- if not labels:
- labels = _defaultconflictlabels
- if len(labels) < 3:
- labels.append(b'base')
- return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
+ return _merge(repo, local, other, base, b'merge3')
@internaltool(
@@ -564,62 +580,30 @@
),
precheck=_mergecheck,
)
-def _imerge_diff(
- repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None
-):
+def _imerge_diff(repo, mynode, local, other, base, toolconf, backup):
"""
Uses the internal non-interactive simple merge algorithm for merging
files. It will fail if there are any conflicts and leave markers in
the partially merged file. The marker will have two sections, one with the
content from one side of the merge, and one with a diff from the base
content to the content on the other side. (experimental)"""
- if not labels:
- labels = _defaultconflictlabels
- if len(labels) < 3:
- labels.append(b'base')
- return _merge(
- repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'mergediff'
- )
-
-
-def _imergeauto(
- repo,
- mynode,
- orig,
- fcd,
- fco,
- fca,
- toolconf,
- files,
- labels=None,
- localorother=None,
-):
- """
- Generic driver for _imergelocal and _imergeother
- """
- assert localorother is not None
- r = simplemerge.simplemerge(
- repo.ui, fcd, fca, fco, label=labels, localorother=localorother
- )
- return True, r
+ return _merge(repo, local, other, base, b'mergediff')
@internaltool(b'merge-local', mergeonly, precheck=_mergecheck)
-def _imergelocal(*args, **kwargs):
+def _imergelocal(repo, mynode, local, other, base, toolconf, backup):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the local `p1()` changes."""
- success, status = _imergeauto(localorother=b'local', *args, **kwargs)
- return success, status, False
+ return _merge(repo, local, other, base, b'local')
@internaltool(b'merge-other', mergeonly, precheck=_mergecheck)
-def _imergeother(*args, **kwargs):
+def _imergeother(repo, mynode, local, other, base, toolconf, backup):
"""
Like :merge, but resolve all conflicts non-interactively in favor
of the other `p2()` changes."""
- success, status = _imergeauto(localorother=b'other', *args, **kwargs)
- return success, status, False
+ return _merge(repo, local, other, base, b'other')
@internaltool(
@@ -631,16 +615,16 @@
b"tool of your choice)\n"
),
)
-def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _itagmerge(repo, mynode, local, other, base, toolconf, backup):
"""
Uses the internal tag merge algorithm (experimental).
"""
- success, status = tagmerge.merge(repo, fcd, fco, fca)
+ success, status = tagmerge.merge(repo, local.fctx, other.fctx, base.fctx)
return success, status, False
@internaltool(b'dump', fullmerge, binary=True, symlink=True)
-def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _idump(repo, mynode, local, other, base, toolconf, backup):
"""
Creates three versions of the files to merge, containing the
contents of local, other and base. These files can then be used to
@@ -652,33 +636,31 @@
This implies premerge. Therefore, files aren't dumped, if premerge
runs successfully. Use :forcedump to forcibly write files out.
"""
- a = _workingpath(repo, fcd)
- fd = fcd.path()
+ a = _workingpath(repo, local.fctx)
+ fd = local.fctx.path()
from . import context
- if isinstance(fcd, context.overlayworkingfilectx):
+ if isinstance(local.fctx, context.overlayworkingfilectx):
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support the :dump tool.'
)
- util.writefile(a + b".local", fcd.decodeddata())
- repo.wwrite(fd + b".other", fco.data(), fco.flags())
- repo.wwrite(fd + b".base", fca.data(), fca.flags())
+ util.writefile(a + b".local", local.fctx.decodeddata())
+ repo.wwrite(fd + b".other", other.fctx.data(), other.fctx.flags())
+ repo.wwrite(fd + b".base", base.fctx.data(), base.fctx.flags())
return False, 1, False
@internaltool(b'forcedump', mergeonly, binary=True, symlink=True)
-def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _forcedump(repo, mynode, local, other, base, toolconf, backup):
"""
Creates three versions of the files as same as :dump, but omits premerge.
"""
- return _idump(
- repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels
- )
+ return _idump(repo, mynode, local, other, base, toolconf, backup)
-def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+def _xmergeimm(repo, mynode, local, other, base, toolconf, backup):
# In-memory merge simply raises an exception on all external merge tools,
# for now.
#
@@ -746,7 +728,10 @@
ui.status(t.renderdefault(props))
-def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels):
+def _xmerge(repo, mynode, local, other, base, toolconf, backup):
+ fcd = local.fctx
+ fco = other.fctx
+ fca = base.fctx
tool, toolpath, binary, symlink, scriptfn = toolconf
uipathfn = scmutil.getuipathfn(repo)
if fcd.isabsent() or fco.isabsent():
@@ -755,20 +740,35 @@
% (tool, uipathfn(fcd.path()))
)
return False, 1, None
- unused, unused, unused, back = files
localpath = _workingpath(repo, fcd)
args = _toolstr(repo.ui, tool, b"args")
- with _maketempfiles(
- repo, fco, fca, repo.wvfs.join(back.path()), b"$output" in args
- ) as temppaths:
- basepath, otherpath, localoutputpath = temppaths
- outpath = b""
- mylabel, otherlabel = labels[:2]
- if len(labels) >= 3:
- baselabel = labels[2]
- else:
- baselabel = b'base'
+ files = [
+ (b"base", fca.path(), fca.decodeddata()),
+ (b"other", fco.path(), fco.decodeddata()),
+ ]
+ outpath = b""
+ if b"$output" in args:
+ # read input from backup, write to original
+ outpath = localpath
+ localoutputpath = backup.path()
+ # Remove the .orig to make syntax-highlighting more likely.
+ if localoutputpath.endswith(b'.orig'):
+ localoutputpath, ext = os.path.splitext(localoutputpath)
+ localdata = util.readfile(localpath)
+ files.append((b"local", localoutputpath, localdata))
+
+ with _maketempfiles(files) as temppaths:
+ basepath, otherpath = temppaths[:2]
+ if len(temppaths) == 3:
+ localpath = temppaths[2]
+
+ def format_label(input):
+ if input.label_detail:
+ return b'%s: %s' % (input.label, input.label_detail)
+ else:
+ return input.label
+
env = {
b'HG_FILE': fcd.path(),
b'HG_MY_NODE': short(mynode),
@@ -777,24 +777,20 @@
b'HG_MY_ISLINK': b'l' in fcd.flags(),
b'HG_OTHER_ISLINK': b'l' in fco.flags(),
b'HG_BASE_ISLINK': b'l' in fca.flags(),
- b'HG_MY_LABEL': mylabel,
- b'HG_OTHER_LABEL': otherlabel,
- b'HG_BASE_LABEL': baselabel,
+ b'HG_MY_LABEL': format_label(local),
+ b'HG_OTHER_LABEL': format_label(other),
+ b'HG_BASE_LABEL': format_label(base),
}
ui = repo.ui
- if b"$output" in args:
- # read input from backup, write to original
- outpath = localpath
- localpath = localoutputpath
replace = {
b'local': localpath,
b'base': basepath,
b'other': otherpath,
b'output': outpath,
- b'labellocal': mylabel,
- b'labelother': otherlabel,
- b'labelbase': baselabel,
+ b'labellocal': format_label(local),
+ b'labelother': format_label(other),
+ b'labelbase': format_label(base),
}
args = util.interpolate(
br'\$',
@@ -846,40 +842,19 @@
return True, r, False
-def _formatconflictmarker(ctx, template, label, pad):
- """Applies the given template to the ctx, prefixed by the label.
-
- Pad is the minimum width of the label prefix, so that multiple markers
- can have aligned templated parts.
- """
+def _populate_label_detail(input, template):
+ """Applies the given template to the ctx and stores it in the input."""
+ ctx = input.fctx.changectx()
if ctx.node() is None:
ctx = ctx.p1()
props = {b'ctx': ctx}
templateresult = template.renderdefault(props)
-
- label = (b'%s:' % label).ljust(pad + 1)
- mark = b'%s %s' % (label, templateresult)
-
- if mark:
- mark = mark.splitlines()[0] # split for safety
-
- # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
- return stringutil.ellipsis(mark, 80 - 8)
+ input.label_detail = templateresult.splitlines()[0] # split for safety
-_defaultconflictlabels = [b'local', b'other']
-
-
-def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
- """Formats the given labels using the conflict marker template.
-
- Returns a list of formatted labels.
- """
- cd = fcd.changectx()
- co = fco.changectx()
- ca = fca.changectx()
-
+def _populate_label_details(repo, inputs, tool=None):
+ """Populates the label details using the conflict marker template."""
ui = repo.ui
template = ui.config(b'command-templates', b'mergemarker')
if tool is not None:
@@ -890,15 +865,8 @@
ui, template, defaults=templatekw.keywords, resources=tres
)
- pad = max(len(l) for l in labels)
-
- newlabels = [
- _formatconflictmarker(cd, tmpl, labels[0], pad),
- _formatconflictmarker(co, tmpl, labels[1], pad),
- ]
- if len(labels) > 2:
- newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad))
- return newlabels
+ for input in inputs:
+ _populate_label_detail(input, tmpl)
def partextras(labels):
@@ -918,13 +886,7 @@
}
-def _restorebackup(fcd, back):
- # TODO: Add a workingfilectx.write(otherfilectx) path so we can use
- # util.copy here instead.
- fcd.write(back.data(), fcd.flags())
-
-
-def _makebackup(repo, ui, wctx, fcd, premerge):
+def _makebackup(repo, ui, fcd):
"""Makes and returns a filectx-like object for ``fcd``'s backup file.
In addition to preserving the user's pre-existing modifications to `fcd`
@@ -932,8 +894,8 @@
merge changed anything, and determine what line endings the new file should
have.
- Backups only need to be written once (right before the premerge) since their
- content doesn't change afterwards.
+ Backups only need to be written once since their content doesn't change
+ afterwards.
"""
if fcd.isabsent():
return None
@@ -941,96 +903,47 @@
# merge -> filemerge). (I suspect the fileset import is the weakest link)
from . import context
- back = scmutil.backuppath(ui, repo, fcd.path())
- inworkingdir = back.startswith(repo.wvfs.base) and not back.startswith(
- repo.vfs.base
- )
- if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
- # If the backup file is to be in the working directory, and we're
- # merging in-memory, we must redirect the backup to the memory context
- # so we don't disturb the working directory.
- relpath = back[len(repo.wvfs.base) + 1 :]
- if premerge:
- wctx[relpath].write(fcd.data(), fcd.flags())
- return wctx[relpath]
+ if isinstance(fcd, context.overlayworkingfilectx):
+ # If we're merging in-memory, we're free to put the backup anywhere.
+ fd, backup = pycompat.mkstemp(b'hg-merge-backup')
+ with os.fdopen(fd, 'wb') as f:
+ f.write(fcd.data())
else:
- if premerge:
- # Otherwise, write to wherever path the user specified the backups
- # should go. We still need to switch based on whether the source is
- # in-memory so we can use the fast path of ``util.copy`` if both are
- # on disk.
- if isinstance(fcd, context.overlayworkingfilectx):
- util.writefile(back, fcd.data())
- else:
- a = _workingpath(repo, fcd)
- util.copyfile(a, back)
- # A arbitraryfilectx is returned, so we can run the same functions on
- # the backup context regardless of where it lives.
- return context.arbitraryfilectx(back, repo=repo)
+ backup = scmutil.backuppath(ui, repo, fcd.path())
+ a = _workingpath(repo, fcd)
+ util.copyfile(a, backup)
+
+ return context.arbitraryfilectx(backup, repo=repo)
@contextlib.contextmanager
-def _maketempfiles(repo, fco, fca, localpath, uselocalpath):
- """Writes out `fco` and `fca` as temporary files, and (if uselocalpath)
- copies `localpath` to another temporary file, so an external merge tool may
- use them.
+def _maketempfiles(files):
+ """Creates a temporary file for each (prefix, path, data) tuple in `files`,
+ so an external merge tool may use them.
"""
- tmproot = None
- tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix')
- if tmprootprefix:
- tmproot = pycompat.mkdtemp(prefix=tmprootprefix)
+ tmproot = pycompat.mkdtemp(prefix=b'hgmerge-')
- def maketempfrompath(prefix, path):
+ def maketempfrompath(prefix, path, data):
fullbase, ext = os.path.splitext(path)
pre = b"%s~%s" % (os.path.basename(fullbase), prefix)
- if tmproot:
- name = os.path.join(tmproot, pre)
- if ext:
- name += ext
- f = open(name, "wb")
- else:
- fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext)
- f = os.fdopen(fd, "wb")
- return f, name
-
- def tempfromcontext(prefix, ctx):
- f, name = maketempfrompath(prefix, ctx.path())
- data = repo.wwritedata(ctx.path(), ctx.data())
- f.write(data)
- f.close()
+ name = os.path.join(tmproot, pre)
+ if ext:
+ name += ext
+ util.writefile(name, data)
return name
- b = tempfromcontext(b"base", fca)
- c = tempfromcontext(b"other", fco)
- d = localpath
- if uselocalpath:
- # We start off with this being the backup filename, so remove the .orig
- # to make syntax-highlighting more likely.
- if d.endswith(b'.orig'):
- d, _ = os.path.splitext(d)
- f, d = maketempfrompath(b"local", d)
- with open(localpath, b'rb') as src:
- f.write(src.read())
- f.close()
-
+ temp_files = []
+ for prefix, path, data in files:
+ temp_files.append(maketempfrompath(prefix, path, data))
try:
- yield b, c, d
+ yield temp_files
finally:
- if tmproot:
- shutil.rmtree(tmproot)
- else:
- util.unlink(b)
- util.unlink(c)
- # if not uselocalpath, d is the 'orig'/backup file which we
- # shouldn't delete.
- if d and uselocalpath:
- util.unlink(d)
+ shutil.rmtree(tmproot)
-def _filemerge(premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
+def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
"""perform a 3-way merge in the working directory
- premerge = whether this is a premerge
mynode = parent node before merge
orig = original local filename before merge
fco = other file context
@@ -1039,10 +952,6 @@
Returns whether the merge is complete, the return value of the merge, and
a boolean indicating whether the file was deleted from disk."""
-
- if not fco.cmp(fcd): # files identical?
- return True, None, False
-
ui = repo.ui
fd = fcd.path()
uipathfn = scmutil.getuipathfn(repo)
@@ -1098,32 +1007,43 @@
toolconf = tool, toolpath, binary, symlink, scriptfn
+ if not labels:
+ labels = [b'local', b'other']
+ if len(labels) < 3:
+ labels.append(b'base')
+ local = simplemerge.MergeInput(fcd, labels[0])
+ other = simplemerge.MergeInput(fco, labels[1])
+ base = simplemerge.MergeInput(fca, labels[2])
if mergetype == nomerge:
- r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
- return True, r, deleted
+ return func(
+ repo,
+ mynode,
+ local,
+ other,
+ base,
+ toolconf,
+ )
- if premerge:
- if orig != fco.path():
- ui.status(
- _(b"merging %s and %s to %s\n")
- % (uipathfn(orig), uipathfn(fco.path()), fduipath)
- )
- else:
- ui.status(_(b"merging %s\n") % fduipath)
+ if orig != fco.path():
+ ui.status(
+ _(b"merging %s and %s to %s\n")
+ % (uipathfn(orig), uipathfn(fco.path()), fduipath)
+ )
+ else:
+ ui.status(_(b"merging %s\n") % fduipath)
ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca))
- if precheck and not precheck(repo, mynode, orig, fcd, fco, fca, toolconf):
+ if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf):
if onfailure:
if wctx.isinmemory():
raise error.InMemoryMergeConflictsError(
b'in-memory merge does not support merge conflicts'
)
ui.warn(onfailure % fduipath)
- return True, 1, False
+ return 1, False
- back = _makebackup(repo, ui, wctx, fcd, premerge)
- files = (None, None, None, back)
+ backup = _makebackup(repo, ui, fcd)
r = 1
try:
internalmarkerstyle = ui.config(b'ui', b'mergemarkers')
@@ -1132,51 +1052,53 @@
else:
markerstyle = internalmarkerstyle
- if not labels:
- labels = _defaultconflictlabels
- formattedlabels = labels
- if markerstyle != b'basic':
- formattedlabels = _formatlabels(
- repo, fcd, fco, fca, labels, tool=tool
- )
-
- if premerge and mergetype == fullmerge:
+ if mergetype == fullmerge:
# conflict markers generated by premerge will use 'detailed'
# settings if either ui.mergemarkers or the tool's mergemarkers
# setting is 'detailed'. This way tools can have basic labels in
# space-constrained areas of the UI, but still get full information
# in conflict markers if premerge is 'keep' or 'keep-merge3'.
- premergelabels = labels
labeltool = None
if markerstyle != b'basic':
# respect 'tool's mergemarkertemplate (which defaults to
# command-templates.mergemarker)
labeltool = tool
if internalmarkerstyle != b'basic' or markerstyle != b'basic':
- premergelabels = _formatlabels(
- repo, fcd, fco, fca, premergelabels, tool=labeltool
+ _populate_label_details(
+ repo, [local, other, base], tool=labeltool
)
r = _premerge(
- repo, fcd, fco, fca, toolconf, files, labels=premergelabels
+ repo,
+ local,
+ other,
+ base,
+ toolconf,
)
- # complete if premerge successful (r is 0)
- return not r, r, False
+ # we're done if premerge was successful (r is 0)
+ if not r:
+ return r, False
+
+ # Reset to basic labels
+ local.label_detail = None
+ other.label_detail = None
+ base.label_detail = None
+
+ if markerstyle != b'basic':
+ _populate_label_details(repo, [local, other, base], tool=tool)
needcheck, r, deleted = func(
repo,
mynode,
- orig,
- fcd,
- fco,
- fca,
+ local,
+ other,
+ base,
toolconf,
- files,
- labels=formattedlabels,
+ backup,
)
if needcheck:
- r = _check(repo, r, ui, tool, fcd, files)
+ r = _check(repo, r, ui, tool, fcd, backup)
if r:
if onfailure:
@@ -1189,10 +1111,10 @@
ui.warn(onfailure % fduipath)
_onfilemergefailure(ui)
- return True, r, deleted
+ return r, deleted
finally:
- if not r and back is not None:
- back.remove()
+ if not r and backup is not None:
+ backup.remove()
def _haltmerge():
@@ -1225,10 +1147,9 @@
)
-def _check(repo, r, ui, tool, fcd, files):
+def _check(repo, r, ui, tool, fcd, backup):
fd = fcd.path()
uipathfn = scmutil.getuipathfn(repo)
- unused, unused, unused, back = files
if not r and (
_toolbool(ui, tool, b"checkconflicts")
@@ -1255,7 +1176,7 @@
or b'changed' in _toollist(ui, tool, b"check")
)
):
- if back is not None and not fcd.cmp(back):
+ if backup is not None and not fcd.cmp(backup):
if ui.promptchoice(
_(
b" output file %s appears unchanged\n"
@@ -1267,8 +1188,8 @@
):
r = 1
- if back is not None and _toolbool(ui, tool, b"fixeol"):
- _matcheol(_workingpath(repo, fcd), back)
+ if backup is not None and _toolbool(ui, tool, b"fixeol"):
+ _matcheol(_workingpath(repo, fcd), backup)
return r
@@ -1277,18 +1198,6 @@
return repo.wjoin(ctx.path())
-def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
- return _filemerge(
- True, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
- )
-
-
-def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None):
- return _filemerge(
- False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
- )
-
-
def loadinternalmerge(ui, extname, registrarobj):
"""Load internal merge tool from specified registrarobj"""
for name, func in pycompat.iteritems(registrarobj._table):
--- a/mercurial/helptext/config.txt Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/helptext/config.txt Fri Feb 18 14:27:43 2022 +0100
@@ -513,13 +513,18 @@
``update.check``
Determines what level of checking :hg:`update` will perform before moving
to a destination revision. Valid values are ``abort``, ``none``,
- ``linear``, and ``noconflict``. ``abort`` always fails if the working
- directory has uncommitted changes. ``none`` performs no checking, and may
- result in a merge with uncommitted changes. ``linear`` allows any update
- as long as it follows a straight line in the revision history, and may
- trigger a merge with uncommitted changes. ``noconflict`` will allow any
- update which would not trigger a merge with uncommitted changes, if any
- are present.
+ ``linear``, and ``noconflict``.
+
+ - ``abort`` always fails if the working directory has uncommitted changes.
+
+ - ``none`` performs no checking, and may result in a merge with uncommitted changes.
+
+ - ``linear`` allows any update as long as it follows a straight line in the
+ revision history, and may trigger a merge with uncommitted changes.
+
+ - ``noconflict`` will allow any update which would not trigger a merge with
+ uncommitted changes, if any are present.
+
(default: ``linear``)
``update.requiredest``
@@ -850,6 +855,24 @@
# (this extension will get loaded from the file specified)
myfeature = ~/.hgext/myfeature.py
+If an extension fails to load, a warning will be issued, and Mercurial will
+proceed. To enforce that an extension must be loaded, one can set the `required`
+suboption in the config::
+
+ [extensions]
+ myfeature = ~/.hgext/myfeature.py
+ myfeature:required = yes
+
+To debug extension loading issue, one can add `--traceback` to their mercurial
+invocation.
+
+A default setting can we set using the special `*` extension key::
+
+ [extensions]
+ *:required = yes
+ myfeature = ~/.hgext/myfeature.py
+ rebase=
+
``format``
----------
@@ -921,6 +944,38 @@
For a more comprehensive guide, see :hg:`help internals.dirstate-v2`.
+``use-dirstate-tracked-hint``
+ Enable or disable the writing of "tracked key" file alongside the dirstate.
+ (default to disabled)
+
+ That "tracked-hint" can help external automations to detect changes to the
+ set of tracked files. (i.e the result of `hg files` or `hg status -macd`)
+
+ The tracked-hint is written in a new `.hg/dirstate-tracked-hint`. That file
+ contains two lines:
+ - the first line is the file version (currently: 1),
+ - the second line contains the "tracked-hint".
+ That file is written right after the dirstate is written.
+
+ The tracked-hint changes whenever the set of file tracked in the dirstate
+ changes. The general idea is:
+ - if the hint is identical, the set of tracked file SHOULD be identical,
+ - if the hint is different, the set of tracked file MIGHT be different.
+
+ The "hint is identical" case uses `SHOULD` as the dirstate and the hint file
+ are two distinct files and therefore that cannot be read or written to in an
+ atomic way. If the key is identical, nothing garantees that the dirstate is
+ not updated right after the hint file. This is considered a negligible
+ limitation for the intended usecase. It is actually possible to prevent this
+ race by taking the repository lock during read operations.
+
+ They are two "ways" to use this feature:
+
+ 1) monitoring changes to the `.hg/dirstate-tracked-hint`, if the file
+ changes, the tracked set might have changed.
+
+ 2) storing the value and comparing it to a later value.
+
``use-persistent-nodemap``
Enable or disable the "persistent-nodemap" feature which improves
performance if the Rust extensions are available.
@@ -975,7 +1030,7 @@
Introduced in Mercurial 5.7.
- Disabled by default.
+ Enabled by default in Mercurial 6.1.
``usestore``
Enable or disable the "store" repository format which improves
--- a/mercurial/helptext/internals/wireprotocol.txt Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/helptext/internals/wireprotocol.txt Fri Feb 18 14:27:43 2022 +0100
@@ -332,95 +332,6 @@
after responses. In other words, the length of the response contains the
trailing ``\n``.
-Clients supporting version 2 of the SSH transport send a line beginning
-with ``upgrade`` before the ``hello`` and ``between`` commands. The line
-(which isn't a well-formed command line because it doesn't consist of a
-single command name) serves to both communicate the client's intent to
-switch to transport version 2 (transports are version 1 by default) as
-well as to advertise the client's transport-level capabilities so the
-server may satisfy that request immediately.
-
-The upgrade line has the form:
-
- upgrade <token> <transport capabilities>
-
-That is the literal string ``upgrade`` followed by a space, followed by
-a randomly generated string, followed by a space, followed by a string
-denoting the client's transport capabilities.
-
-The token can be anything. However, a random UUID is recommended. (Use
-of version 4 UUIDs is recommended because version 1 UUIDs can leak the
-client's MAC address.)
-
-The transport capabilities string is a URL/percent encoded string
-containing key-value pairs defining the client's transport-level
-capabilities. The following capabilities are defined:
-
-proto
- A comma-delimited list of transport protocol versions the client
- supports. e.g. ``ssh-v2``.
-
-If the server does not recognize the ``upgrade`` line, it should issue
-an empty response and continue processing the ``hello`` and ``between``
-commands. Here is an example handshake between a version 2 aware client
-and a non version 2 aware server:
-
- c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
- c: hello\n
- c: between\n
- c: pairs 81\n
- c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- s: 0\n
- s: 324\n
- s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
- s: 1\n
- s: \n
-
-(The initial ``0\n`` line from the server indicates an empty response to
-the unknown ``upgrade ..`` command/line.)
-
-If the server recognizes the ``upgrade`` line and is willing to satisfy that
-upgrade request, it replies to with a payload of the following form:
-
- upgraded <token> <transport name>\n
-
-This line is the literal string ``upgraded``, a space, the token that was
-specified by the client in its ``upgrade ...`` request line, a space, and the
-name of the transport protocol that was chosen by the server. The transport
-name MUST match one of the names the client specified in the ``proto`` field
-of its ``upgrade ...`` request line.
-
-If a server issues an ``upgraded`` response, it MUST also read and ignore
-the lines associated with the ``hello`` and ``between`` command requests
-that were issued by the server. It is assumed that the negotiated transport
-will respond with equivalent requested information following the transport
-handshake.
-
-All data following the ``\n`` terminating the ``upgraded`` line is the
-domain of the negotiated transport. It is common for the data immediately
-following to contain additional metadata about the state of the transport and
-the server. However, this isn't strictly speaking part of the transport
-handshake and isn't covered by this section.
-
-Here is an example handshake between a version 2 aware client and a version
-2 aware server:
-
- c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
- c: hello\n
- c: between\n
- c: pairs 81\n
- c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
- s: <additional transport specific data>
-
-The client-issued token that is echoed in the response provides a more
-resilient mechanism for differentiating *banner* output from Mercurial
-output. In version 1, properly formatted banner output could get confused
-for Mercurial server output. By submitting a randomly generated token
-that is then present in the response, the client can look for that token
-in response lines and have reasonable certainty that the line did not
-originate from a *banner* message.
-
SSH Version 1 Transport
-----------------------
@@ -488,31 +399,6 @@
should issue a ``protocaps`` command after the initial handshake to annonunce
its own capabilities. The client capabilities are persistent.
-SSH Version 2 Transport
------------------------
-
-**Experimental and under development**
-
-Version 2 of the SSH transport behaves identically to version 1 of the SSH
-transport with the exception of handshake semantics. See above for how
-version 2 of the SSH transport is negotiated.
-
-Immediately following the ``upgraded`` line signaling a switch to version
-2 of the SSH protocol, the server automatically sends additional details
-about the capabilities of the remote server. This has the form:
-
- <integer length of value>\n
- capabilities: ...\n
-
-e.g.
-
- s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
- s: 240\n
- s: capabilities: known getbundle batch ...\n
-
-Following capabilities advertisement, the peers communicate using version
-1 of the SSH transport.
-
Capabilities
============
--- a/mercurial/helptext/patterns.txt Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/helptext/patterns.txt Fri Feb 18 14:27:43 2022 +0100
@@ -1,8 +1,10 @@
Mercurial accepts several notations for identifying one or more files
at a time.
-By default, Mercurial treats filenames as shell-style extended glob
-patterns.
+By default, Mercurial treats filenames verbatim without pattern
+matching, relative to the current working directory. Note that your
+system shell might perform pattern matching of its own before passing
+filenames into Mercurial.
Alternate pattern notations must be specified explicitly.
--- a/mercurial/hg.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/hg.py Fri Feb 18 14:27:43 2022 +0100
@@ -132,13 +132,6 @@
return revs, revs[0]
-def parseurl(path, branches=None):
- '''parse url#branch, returning (url, (branch, branches))'''
- msg = b'parseurl(...) moved to mercurial.utils.urlutil'
- util.nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.parseurl(path, branches=branches)
-
-
schemes = {
b'bundle': bundlerepo,
b'union': unionrepo,
--- a/mercurial/hgweb/hgweb_mod.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/hgweb/hgweb_mod.py Fri Feb 18 14:27:43 2022 +0100
@@ -366,17 +366,6 @@
# replace it.
res.headers[b'Content-Security-Policy'] = rctx.csp
- # /api/* is reserved for various API implementations. Dispatch
- # accordingly. But URL paths can conflict with subrepos and virtual
- # repos in hgwebdir. So until we have a workaround for this, only
- # expose the URLs if the feature is enabled.
- apienabled = rctx.repo.ui.configbool(b'experimental', b'web.apiserver')
- if apienabled and req.dispatchparts and req.dispatchparts[0] == b'api':
- wireprotoserver.handlewsgiapirequest(
- rctx, req, res, self.check_perm
- )
- return res.sendresponse()
-
handled = wireprotoserver.handlewsgirequest(
rctx, req, res, self.check_perm
)
--- a/mercurial/hgweb/webcommands.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/hgweb/webcommands.py Fri Feb 18 14:27:43 2022 +0100
@@ -519,6 +519,7 @@
def decodepath(path):
+ # type: (bytes) -> bytes
"""Hook for mapping a path in the repository to a path in the
working copy.
@@ -616,7 +617,9 @@
yield {
b"parity": next(parity),
b"path": path,
+ # pytype: disable=wrong-arg-types
b"emptydirs": b"/".join(emptydirs),
+ # pytype: enable=wrong-arg-types
b"basename": d,
}
--- a/mercurial/httppeer.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/httppeer.py Fri Feb 18 14:27:43 2022 +0100
@@ -13,7 +13,6 @@
import os
import socket
import struct
-import weakref
from .i18n import _
from .pycompat import getattr
@@ -25,21 +24,9 @@
statichttprepo,
url as urlmod,
util,
- wireprotoframing,
- wireprototypes,
wireprotov1peer,
- wireprotov2peer,
- wireprotov2server,
)
-from .interfaces import (
- repository,
- util as interfaceutil,
-)
-from .utils import (
- cborutil,
- stringutil,
- urlutil,
-)
+from .utils import urlutil
httplib = util.httplib
urlerr = util.urlerr
@@ -331,9 +318,7 @@
self.respurl = respurl
-def parsev1commandresponse(
- ui, baseurl, requrl, qs, resp, compressible, allowcbor=False
-):
+def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
# record the url we got redirected to
redirected = False
respurl = pycompat.bytesurl(resp.geturl())
@@ -376,17 +361,6 @@
try:
subtype = proto.split(b'-', 1)[1]
- # Unless we end up supporting CBOR in the legacy wire protocol,
- # this should ONLY be encountered for the initial capabilities
- # request during handshake.
- if subtype == b'cbor':
- if allowcbor:
- return respurl, proto, resp
- else:
- raise error.RepoError(
- _(b'unexpected CBOR response from server')
- )
-
version_info = tuple([int(n) for n in subtype.split(b'.')])
except ValueError:
raise error.RepoError(
@@ -564,85 +538,6 @@
raise exception
-def sendv2request(
- ui, opener, requestbuilder, apiurl, permission, requests, redirect
-):
- wireprotoframing.populatestreamencoders()
-
- uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
-
- if uiencoders:
- encoders = []
-
- for encoder in uiencoders:
- if encoder not in wireprotoframing.STREAM_ENCODERS:
- ui.warn(
- _(
- b'wire protocol version 2 encoder referenced in '
- b'config (%s) is not known; ignoring\n'
- )
- % encoder
- )
- else:
- encoders.append(encoder)
-
- else:
- encoders = wireprotoframing.STREAM_ENCODERS_ORDER
-
- reactor = wireprotoframing.clientreactor(
- ui,
- hasmultiplesend=False,
- buffersends=True,
- clientcontentencoders=encoders,
- )
-
- handler = wireprotov2peer.clienthandler(
- ui, reactor, opener=opener, requestbuilder=requestbuilder
- )
-
- url = b'%s/%s' % (apiurl, permission)
-
- if len(requests) > 1:
- url += b'/multirequest'
- else:
- url += b'/%s' % requests[0][0]
-
- ui.debug(b'sending %d commands\n' % len(requests))
- for command, args, f in requests:
- ui.debug(
- b'sending command %s: %s\n'
- % (command, stringutil.pprint(args, indent=2))
- )
- assert not list(
- handler.callcommand(command, args, f, redirect=redirect)
- )
-
- # TODO stream this.
- body = b''.join(map(bytes, handler.flushcommands()))
-
- # TODO modify user-agent to reflect v2
- headers = {
- 'Accept': wireprotov2server.FRAMINGTYPE,
- 'Content-Type': wireprotov2server.FRAMINGTYPE,
- }
-
- req = requestbuilder(pycompat.strurl(url), body, headers)
- req.add_unredirected_header('Content-Length', '%d' % len(body))
-
- try:
- res = opener.open(req)
- except urlerr.httperror as e:
- if e.code == 401:
- raise error.Abort(_(b'authorization failed'))
-
- raise
- except httplib.HTTPException as e:
- ui.traceback()
- raise IOError(None, e)
-
- return handler, res
-
-
class queuedcommandfuture(pycompat.futures.Future):
"""Wraps result() on command futures to trigger submission on call."""
@@ -657,302 +552,6 @@
return self.result(timeout)
-@interfaceutil.implementer(repository.ipeercommandexecutor)
-class httpv2executor(object):
- def __init__(
- self, ui, opener, requestbuilder, apiurl, descriptor, redirect
- ):
- self._ui = ui
- self._opener = opener
- self._requestbuilder = requestbuilder
- self._apiurl = apiurl
- self._descriptor = descriptor
- self._redirect = redirect
- self._sent = False
- self._closed = False
- self._neededpermissions = set()
- self._calls = []
- self._futures = weakref.WeakSet()
- self._responseexecutor = None
- self._responsef = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exctype, excvalue, exctb):
- self.close()
-
- def callcommand(self, command, args):
- if self._sent:
- raise error.ProgrammingError(
- b'callcommand() cannot be used after commands are sent'
- )
-
- if self._closed:
- raise error.ProgrammingError(
- b'callcommand() cannot be used after close()'
- )
-
- # The service advertises which commands are available. So if we attempt
- # to call an unknown command or pass an unknown argument, we can screen
- # for this.
- if command not in self._descriptor[b'commands']:
- raise error.ProgrammingError(
- b'wire protocol command %s is not available' % command
- )
-
- cmdinfo = self._descriptor[b'commands'][command]
- unknownargs = set(args.keys()) - set(cmdinfo.get(b'args', {}))
-
- if unknownargs:
- raise error.ProgrammingError(
- b'wire protocol command %s does not accept argument: %s'
- % (command, b', '.join(sorted(unknownargs)))
- )
-
- self._neededpermissions |= set(cmdinfo[b'permissions'])
-
- # TODO we /could/ also validate types here, since the API descriptor
- # includes types...
-
- f = pycompat.futures.Future()
-
- # Monkeypatch it so result() triggers sendcommands(), otherwise result()
- # could deadlock.
- f.__class__ = queuedcommandfuture
- f._peerexecutor = self
-
- self._futures.add(f)
- self._calls.append((command, args, f))
-
- return f
-
- def sendcommands(self):
- if self._sent:
- return
-
- if not self._calls:
- return
-
- self._sent = True
-
- # Unhack any future types so caller sees a clean type and so we
- # break reference cycle.
- for f in self._futures:
- if isinstance(f, queuedcommandfuture):
- f.__class__ = pycompat.futures.Future
- f._peerexecutor = None
-
- # Mark the future as running and filter out cancelled futures.
- calls = [
- (command, args, f)
- for command, args, f in self._calls
- if f.set_running_or_notify_cancel()
- ]
-
- # Clear out references, prevent improper object usage.
- self._calls = None
-
- if not calls:
- return
-
- permissions = set(self._neededpermissions)
-
- if b'push' in permissions and b'pull' in permissions:
- permissions.remove(b'pull')
-
- if len(permissions) > 1:
- raise error.RepoError(
- _(b'cannot make request requiring multiple permissions: %s')
- % _(b', ').join(sorted(permissions))
- )
-
- permission = {
- b'push': b'rw',
- b'pull': b'ro',
- }[permissions.pop()]
-
- handler, resp = sendv2request(
- self._ui,
- self._opener,
- self._requestbuilder,
- self._apiurl,
- permission,
- calls,
- self._redirect,
- )
-
- # TODO we probably want to validate the HTTP code, media type, etc.
-
- self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
- self._responsef = self._responseexecutor.submit(
- self._handleresponse, handler, resp
- )
-
- def close(self):
- if self._closed:
- return
-
- self.sendcommands()
-
- self._closed = True
-
- if not self._responsef:
- return
-
- # TODO ^C here may not result in immediate program termination.
-
- try:
- self._responsef.result()
- finally:
- self._responseexecutor.shutdown(wait=True)
- self._responsef = None
- self._responseexecutor = None
-
- # If any of our futures are still in progress, mark them as
- # errored, otherwise a result() could wait indefinitely.
- for f in self._futures:
- if not f.done():
- f.set_exception(
- error.ResponseError(_(b'unfulfilled command response'))
- )
-
- self._futures = None
-
- def _handleresponse(self, handler, resp):
- # Called in a thread to read the response.
-
- while handler.readdata(resp):
- pass
-
-
-@interfaceutil.implementer(repository.ipeerv2)
-class httpv2peer(object):
-
- limitedarguments = False
-
- def __init__(
- self, ui, repourl, apipath, opener, requestbuilder, apidescriptor
- ):
- self.ui = ui
- self.apidescriptor = apidescriptor
-
- if repourl.endswith(b'/'):
- repourl = repourl[:-1]
-
- self._url = repourl
- self._apipath = apipath
- self._apiurl = b'%s/%s' % (repourl, apipath)
- self._opener = opener
- self._requestbuilder = requestbuilder
-
- self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
-
- # Start of ipeerconnection.
-
- def url(self):
- return self._url
-
- def local(self):
- return None
-
- def peer(self):
- return self
-
- def canpush(self):
- # TODO change once implemented.
- return False
-
- def close(self):
- self.ui.note(
- _(
- b'(sent %d HTTP requests and %d bytes; '
- b'received %d bytes in responses)\n'
- )
- % (
- self._opener.requestscount,
- self._opener.sentbytescount,
- self._opener.receivedbytescount,
- )
- )
-
- # End of ipeerconnection.
-
- # Start of ipeercapabilities.
-
- def capable(self, name):
- # The capabilities used internally historically map to capabilities
- # advertised from the "capabilities" wire protocol command. However,
- # version 2 of that command works differently.
-
- # Maps to commands that are available.
- if name in (
- b'branchmap',
- b'getbundle',
- b'known',
- b'lookup',
- b'pushkey',
- ):
- return True
-
- # Other concepts.
- if name in (b'bundle2',):
- return True
-
- # Alias command-* to presence of command of that name.
- if name.startswith(b'command-'):
- return name[len(b'command-') :] in self.apidescriptor[b'commands']
-
- return False
-
- def requirecap(self, name, purpose):
- if self.capable(name):
- return
-
- raise error.CapabilityError(
- _(
- b'cannot %s; client or remote repository does not support the '
- b'\'%s\' capability'
- )
- % (purpose, name)
- )
-
- # End of ipeercapabilities.
-
- def _call(self, name, **args):
- with self.commandexecutor() as e:
- return e.callcommand(name, args).result()
-
- def commandexecutor(self):
- return httpv2executor(
- self.ui,
- self._opener,
- self._requestbuilder,
- self._apiurl,
- self.apidescriptor,
- self._redirect,
- )
-
-
-# Registry of API service names to metadata about peers that handle it.
-#
-# The following keys are meaningful:
-#
-# init
-# Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
-# apidescriptor) to create a peer.
-#
-# priority
-# Integer priority for the service. If we could choose from multiple
-# services, we choose the one with the highest priority.
-API_PEERS = {
- wireprototypes.HTTP_WIREPROTO_V2: {
- b'init': httpv2peer,
- b'priority': 50,
- },
-}
-
-
def performhandshake(ui, url, opener, requestbuilder):
# The handshake is a request to the capabilities command.
@@ -963,28 +562,6 @@
args = {}
- # The client advertises support for newer protocols by adding an
- # X-HgUpgrade-* header with a list of supported APIs and an
- # X-HgProto-* header advertising which serializing formats it supports.
- # We only support the HTTP version 2 transport and CBOR responses for
- # now.
- advertisev2 = ui.configbool(b'experimental', b'httppeer.advertise-v2')
-
- if advertisev2:
- args[b'headers'] = {
- 'X-HgProto-1': 'cbor',
- }
-
- args[b'headers'].update(
- encodevalueinheaders(
- b' '.join(sorted(API_PEERS)),
- b'X-HgUpgrade',
- # We don't know the header limit this early.
- # So make it small.
- 1024,
- )
- )
-
req, requrl, qs = makev1commandrequest(
ui, requestbuilder, caps, capable, url, b'capabilities', args
)
@@ -1004,7 +581,7 @@
# redirect that drops the query string to "just work."
try:
respurl, ct, resp = parsev1commandresponse(
- ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
+ ui, url, requrl, qs, resp, compressible=False
)
except RedirectedRepoError as e:
req, requrl, qs = makev1commandrequest(
@@ -1012,7 +589,7 @@
)
resp = sendrequest(ui, opener, req)
respurl, ct, resp = parsev1commandresponse(
- ui, url, requrl, qs, resp, compressible=False, allowcbor=advertisev2
+ ui, url, requrl, qs, resp, compressible=False
)
try:
@@ -1023,29 +600,7 @@
if not ct.startswith(b'application/mercurial-'):
raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
- if advertisev2:
- if ct == b'application/mercurial-cbor':
- try:
- info = cborutil.decodeall(rawdata)[0]
- except cborutil.CBORDecodeError:
- raise error.Abort(
- _(b'error decoding CBOR from remote server'),
- hint=_(
- b'try again and consider contacting '
- b'the server operator'
- ),
- )
-
- # We got a legacy response. That's fine.
- elif ct in (b'application/mercurial-0.1', b'application/mercurial-0.2'):
- info = {b'v1capabilities': set(rawdata.split())}
-
- else:
- raise error.RepoError(
- _(b'unexpected response type from server: %s') % ct
- )
- else:
- info = {b'v1capabilities': set(rawdata.split())}
+ info = {b'v1capabilities': set(rawdata.split())}
return respurl, info
@@ -1073,29 +628,6 @@
respurl, info = performhandshake(ui, url, opener, requestbuilder)
- # Given the intersection of APIs that both we and the server support,
- # sort by their advertised priority and pick the first one.
- #
- # TODO consider making this request-based and interface driven. For
- # example, the caller could say "I want a peer that does X." It's quite
- # possible that not all peers would do that. Since we know the service
- # capabilities, we could filter out services not meeting the
- # requirements. Possibly by consulting the interfaces defined by the
- # peer type.
- apipeerchoices = set(info.get(b'apis', {}).keys()) & set(API_PEERS.keys())
-
- preferredchoices = sorted(
- apipeerchoices, key=lambda x: API_PEERS[x][b'priority'], reverse=True
- )
-
- for service in preferredchoices:
- apipath = b'%s/%s' % (info[b'apibase'].rstrip(b'/'), service)
-
- return API_PEERS[service][b'init'](
- ui, respurl, apipath, opener, requestbuilder, info[b'apis'][service]
- )
-
- # Failed to construct an API peer. Fall back to legacy.
return httppeer(
ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
)
--- a/mercurial/interfaces/dirstate.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/interfaces/dirstate.py Fri Feb 18 14:27:43 2022 +0100
@@ -66,17 +66,6 @@
def pathto(f, cwd=None):
pass
- def __getitem__(key):
- """Return the current state of key (a filename) in the dirstate.
-
- States are:
- n normal
- m needs merging
- r marked for removal
- a marked for addition
- ? not tracked
- """
-
def __contains__(key):
"""Check if bytestring `key` is known to the dirstate."""
--- a/mercurial/interfaces/repository.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/interfaces/repository.py Fri Feb 18 14:27:43 2022 +0100
@@ -1278,7 +1278,7 @@
def linkrev(rev):
"""Obtain the changeset revision number a revision is linked to."""
- def revision(node, _df=None, raw=False):
+ def revision(node, _df=None):
"""Obtain fulltext data for a node."""
def rawdata(node, _df=None):
@@ -1495,13 +1495,6 @@
"""null revision for the hash function used by the repository."""
)
- supportedformats = interfaceutil.Attribute(
- """Set of requirements that apply to stream clone.
-
- This is actually a class attribute and is shared among all instances.
- """
- )
-
supported = interfaceutil.Attribute(
"""Set of requirements that this repo is capable of opening."""
)
@@ -1794,7 +1787,7 @@
DANGEROUS.
"""
- def updatecaches(tr=None, full=False):
+ def updatecaches(tr=None, full=False, caches=None):
"""Warm repo caches."""
def invalidatecaches():
--- a/mercurial/localrepo.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/localrepo.py Fri Feb 18 14:27:43 2022 +0100
@@ -1,4 +1,5 @@
# localrepo.py - read/write repository class for mercurial
+# coding: utf-8
#
# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
#
@@ -931,7 +932,7 @@
if engine.available() and engine.revlogheader():
supported.add(b'exp-compression-%s' % name)
if engine.name() == b'zstd':
- supported.add(b'revlog-compression-zstd')
+ supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
return supported
@@ -1273,32 +1274,26 @@
used.
"""
- # obsolete experimental requirements:
- # - manifestv2: An experimental new manifest format that allowed
- # for stem compression of long paths. Experiment ended up not
- # being successful (repository sizes went up due to worse delta
- # chains), and the code was deleted in 4.6.
- supportedformats = {
+ _basesupported = {
+ requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
+ requirementsmod.CHANGELOGV2_REQUIREMENT,
+ requirementsmod.COPIESSDC_REQUIREMENT,
+ requirementsmod.DIRSTATE_TRACKED_HINT_V1,
+ requirementsmod.DIRSTATE_V2_REQUIREMENT,
+ requirementsmod.DOTENCODE_REQUIREMENT,
+ requirementsmod.FNCACHE_REQUIREMENT,
+ requirementsmod.GENERALDELTA_REQUIREMENT,
+ requirementsmod.INTERNAL_PHASE_REQUIREMENT,
+ requirementsmod.NODEMAP_REQUIREMENT,
+ requirementsmod.RELATIVE_SHARED_REQUIREMENT,
requirementsmod.REVLOGV1_REQUIREMENT,
- requirementsmod.GENERALDELTA_REQUIREMENT,
- requirementsmod.TREEMANIFEST_REQUIREMENT,
- requirementsmod.COPIESSDC_REQUIREMENT,
requirementsmod.REVLOGV2_REQUIREMENT,
- requirementsmod.CHANGELOGV2_REQUIREMENT,
+ requirementsmod.SHARED_REQUIREMENT,
+ requirementsmod.SHARESAFE_REQUIREMENT,
+ requirementsmod.SPARSE_REQUIREMENT,
requirementsmod.SPARSEREVLOG_REQUIREMENT,
- requirementsmod.NODEMAP_REQUIREMENT,
- bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
- requirementsmod.SHARESAFE_REQUIREMENT,
- requirementsmod.DIRSTATE_V2_REQUIREMENT,
- }
- _basesupported = supportedformats | {
requirementsmod.STORE_REQUIREMENT,
- requirementsmod.FNCACHE_REQUIREMENT,
- requirementsmod.SHARED_REQUIREMENT,
- requirementsmod.RELATIVE_SHARED_REQUIREMENT,
- requirementsmod.DOTENCODE_REQUIREMENT,
- requirementsmod.SPARSE_REQUIREMENT,
- requirementsmod.INTERNAL_PHASE_REQUIREMENT,
+ requirementsmod.TREEMANIFEST_REQUIREMENT,
}
# list of prefix for file which can be written without 'wlock'
@@ -1748,7 +1743,9 @@
"""Extension point for wrapping the dirstate per-repo."""
sparsematchfn = lambda: sparse.matcher(self)
v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
+ th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
use_dirstate_v2 = v2_req in self.requirements
+ use_tracked_hint = th in self.requirements
return dirstate.dirstate(
self.vfs,
@@ -1758,6 +1755,7 @@
sparsematchfn,
self.nodeconstants,
use_dirstate_v2,
+ use_tracked_hint=use_tracked_hint,
)
def _dirstatevalidate(self, node):
@@ -3551,6 +3549,10 @@
depends on the configuration
"""
target_requirements = set()
+ if not srcrepo.requirements:
+ # this is a legacy revlog "v0" repository, we cannot do anything fancy
+ # with it.
+ return target_requirements
createopts = defaultcreateopts(ui, createopts=createopts)
for r in newreporequirements(ui, createopts):
if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
@@ -3568,16 +3570,6 @@
Extensions can wrap this function to specify custom requirements for
new repositories.
"""
- # If the repo is being created from a shared repository, we copy
- # its requirements.
- if b'sharedrepo' in createopts:
- requirements = set(createopts[b'sharedrepo'].requirements)
- if createopts.get(b'sharedrelative'):
- requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
- else:
- requirements.add(requirementsmod.SHARED_REQUIREMENT)
-
- return requirements
if b'backend' not in createopts:
raise error.ProgrammingError(
@@ -3663,7 +3655,7 @@
requirements.add(b'lfs')
if ui.configbool(b'format', b'bookmarks-in-store'):
- requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+ requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
if ui.configbool(b'format', b'use-persistent-nodemap'):
requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
@@ -3673,6 +3665,45 @@
if ui.configbool(b'format', b'use-share-safe'):
requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
+ # if we are creating a share-repo¹ we have to handle requirement
+ # differently.
+ #
+ # [1] (i.e. reusing the store from another repository, just having a
+ # working copy)
+ if b'sharedrepo' in createopts:
+ source_requirements = set(createopts[b'sharedrepo'].requirements)
+
+ if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
+ # share to an old school repository, we have to copy the
+ # requirements and hope for the best.
+ requirements = source_requirements
+ else:
+ # We have control on the working copy only, so "copy" the non
+ # working copy part over, ignoring previous logic.
+ to_drop = set()
+ for req in requirements:
+ if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
+ continue
+ if req in source_requirements:
+ continue
+ to_drop.add(req)
+ requirements -= to_drop
+ requirements |= source_requirements
+
+ if createopts.get(b'sharedrelative'):
+ requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
+ else:
+ requirements.add(requirementsmod.SHARED_REQUIREMENT)
+
+ if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
+ version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
+ msg = _("ignoring unknown tracked key version: %d\n")
+ hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
+ if version != 1:
+ ui.warn(msg % version, hint=hint)
+ else:
+ requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
+
return requirements
@@ -3685,7 +3716,7 @@
dropped = set()
if requirementsmod.STORE_REQUIREMENT not in requirements:
- if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
+ if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
ui.warn(
_(
b'ignoring enabled \'format.bookmarks-in-store\' config '
@@ -3693,7 +3724,7 @@
b'\'format.usestore\' config\n'
)
)
- dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+ dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
if (
requirementsmod.SHARED_REQUIREMENT in requirements
@@ -3707,13 +3738,13 @@
)
if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
- ui.warn(
- _(
+ if ui.hasconfig(b'format', b'use-share-safe'):
+ msg = _(
b"ignoring enabled 'format.use-share-safe' config because "
b"it is incompatible with disabled 'format.usestore'"
b" config\n"
)
- )
+ ui.warn(msg)
dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
return dropped
--- a/mercurial/logcmdutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/logcmdutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -62,9 +62,9 @@
try:
limit = int(limit)
except ValueError:
- raise error.Abort(_(b'limit must be a positive integer'))
+ raise error.InputError(_(b'limit must be a positive integer'))
if limit <= 0:
- raise error.Abort(_(b'limit must be positive'))
+ raise error.InputError(_(b'limit must be positive'))
else:
limit = None
return limit
@@ -831,7 +831,7 @@
# take the slow path.
found = slowpath = True
if not found:
- raise error.Abort(
+ raise error.StateError(
_(
b'cannot follow file not in any of the specified '
b'revisions: "%s"'
@@ -847,7 +847,7 @@
slowpath = True
continue
else:
- raise error.Abort(
+ raise error.StateError(
_(
b'cannot follow file not in parent '
b'revision: "%s"'
@@ -858,7 +858,7 @@
if not filelog:
# A file exists in wdir but not in history, which means
# the file isn't committed yet.
- raise error.Abort(
+ raise error.StateError(
_(b'cannot follow nonexistent file: "%s"') % f
)
else:
@@ -1108,11 +1108,13 @@
try:
pat, linerange = pat.rsplit(b',', 1)
except ValueError:
- raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
+ raise error.InputError(
+ _(b'malformatted line-range pattern %s') % pat
+ )
try:
fromline, toline = map(int, linerange.split(b':'))
except ValueError:
- raise error.Abort(_(b"invalid line range for %s") % pat)
+ raise error.InputError(_(b"invalid line range for %s") % pat)
msg = _(b"line range pattern '%s' must match exactly one file") % pat
fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
linerangebyfname.append(
@@ -1136,7 +1138,7 @@
linerangesbyrev = {}
for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
if fname not in wctx:
- raise error.Abort(
+ raise error.StateError(
_(b'cannot follow file not in parent revision: "%s"') % fname
)
fctx = wctx.filectx(fname)
@@ -1271,7 +1273,7 @@
def checkunsupportedgraphflags(pats, opts):
for op in [b"newest_first"]:
if op in opts and opts[op]:
- raise error.Abort(
+ raise error.InputError(
_(b"-G/--graph option is incompatible with --%s")
% op.replace(b"_", b"-")
)
--- a/mercurial/manifest.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/manifest.py Fri Feb 18 14:27:43 2022 +0100
@@ -1819,8 +1819,8 @@
def checksize(self):
return self._revlog.checksize()
- def revision(self, node, _df=None, raw=False):
- return self._revlog.revision(node, _df=_df, raw=raw)
+ def revision(self, node, _df=None):
+ return self._revlog.revision(node, _df=_df)
def rawdata(self, node, _df=None):
return self._revlog.rawdata(node, _df=_df)
--- a/mercurial/mdiff.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/mdiff.py Fri Feb 18 14:27:43 2022 +0100
@@ -84,7 +84,7 @@
try:
self.context = int(self.context)
except ValueError:
- raise error.Abort(
+ raise error.InputError(
_(b'diff context lines count must be an integer, not %r')
% pycompat.bytestr(self.context)
)
--- a/mercurial/merge.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/merge.py Fri Feb 18 14:27:43 2022 +0100
@@ -41,10 +41,9 @@
valid = [b'abort', b'ignore', b'warn']
if config not in valid:
validstr = b', '.join([b"'" + v + b"'" for v in valid])
- raise error.ConfigError(
- _(b"%s.%s not valid ('%s' is none of %s)")
- % (section, name, config, validstr)
- )
+ msg = _(b"%s.%s not valid ('%s' is none of %s)")
+ msg %= (section, name, config, validstr)
+ raise error.ConfigError(msg)
return config
@@ -337,10 +336,9 @@
for f in pmmf:
fold = util.normcase(f)
if fold in foldmap:
- raise error.StateError(
- _(b"case-folding collision between %s and %s")
- % (f, foldmap[fold])
- )
+ msg = _(b"case-folding collision between %s and %s")
+ msg %= (f, foldmap[fold])
+ raise error.StateError(msg)
foldmap[fold] = f
# check case-folding of directories
@@ -348,10 +346,9 @@
for fold, f in sorted(foldmap.items()):
if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
# the folded prefix matches but actual casing is different
- raise error.StateError(
- _(b"case-folding collision between %s and directory of %s")
- % (lastfull, f)
- )
+ msg = _(b"case-folding collision between %s and directory of %s")
+ msg %= (lastfull, f)
+ raise error.StateError(msg)
foldprefix = fold + b'/'
unfoldprefix = f + b'/'
lastfull = f
@@ -491,7 +488,7 @@
mresult.addfile(
p,
mergestatemod.ACTION_PATH_CONFLICT,
- (pnew, mergestatemod.ACTION_REMOVE),
+ (pnew, b'r'),
b'path conflict',
)
remoteconflicts.remove(p)
@@ -512,17 +509,6 @@
Raise an exception if the merge cannot be completed because the repo is
narrowed.
"""
- # TODO: handle with nonconflicttypes
- nonconflicttypes = {
- mergestatemod.ACTION_ADD,
- mergestatemod.ACTION_ADD_MODIFIED,
- mergestatemod.ACTION_CREATED,
- mergestatemod.ACTION_CREATED_MERGE,
- mergestatemod.ACTION_FORGET,
- mergestatemod.ACTION_GET,
- mergestatemod.ACTION_REMOVE,
- mergestatemod.ACTION_EXEC,
- }
# We mutate the items in the dict during iteration, so iterate
# over a copy.
for f, action in mresult.filemap():
@@ -530,21 +516,25 @@
pass
elif not branchmerge:
mresult.removefile(f) # just updating, ignore changes outside clone
- elif action[0] in mergestatemod.NO_OP_ACTIONS:
+ elif action[0].no_op:
mresult.removefile(f) # merge does not affect file
- elif action[0] in nonconflicttypes:
- raise error.Abort(
- _(
+ elif action[0].narrow_safe:
+ if not f.endswith(b'/'):
+ mresult.removefile(f) # merge won't affect on-disk files
+
+ mresult.addcommitinfo(
+ f, b'outside-narrow-merge-action', action[0].changes
+ )
+ else: # TODO: handle the tree case
+ msg = _(
b'merge affects file \'%s\' outside narrow, '
b'which is not yet supported'
)
- % f,
- hint=_(b'merging in the other direction may work'),
- )
+ hint = _(b'merging in the other direction may work')
+ raise error.Abort(msg % f, hint=hint)
else:
- raise error.Abort(
- _(b'conflict in file \'%s\' is outside narrow clone') % f
- )
+ msg = _(b'conflict in file \'%s\' is outside narrow clone')
+ raise error.StateError(msg % f)
class mergeresult(object):
@@ -705,7 +695,7 @@
mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
)
and self._actionmapping[a]
- and a not in mergestatemod.NO_OP_ACTIONS
+ and not a.no_op
):
return True
@@ -1207,7 +1197,7 @@
for f, a in mresult1.filemap(sort=True):
m, args, msg = a
- repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
+ repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m.__bytes__()))
if f in fbids:
d = fbids[f]
if m in d:
@@ -1228,13 +1218,15 @@
repo.ui.debug(b" list of bids for %s:\n" % f)
for m, l in sorted(bids.items()):
for _f, args, msg in l:
- repo.ui.debug(b' %s -> %s\n' % (msg, m))
+ repo.ui.debug(b' %s -> %s\n' % (msg, m.__bytes__()))
# bids is a mapping from action method to list af actions
# Consensus?
if len(bids) == 1: # all bids are the same kind of method
m, l = list(bids.items())[0]
if all(a == l[0] for a in l[1:]): # len(bids) is > 1
- repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
+ repo.ui.note(
+ _(b" %s: consensus for %s\n") % (f, m.__bytes__())
+ )
mresult.addfile(f, *l[0])
continue
# If keep is an option, just do it.
@@ -1292,11 +1284,12 @@
repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
for m, l in sorted(bids.items()):
for _f, args, msg in l:
- repo.ui.note(b' %s -> %s\n' % (msg, m))
+ repo.ui.note(b' %s -> %s\n' % (msg, m.__bytes__()))
# Pick random action. TODO: Instead, prompt user when resolving
m, l = list(bids.items())[0]
repo.ui.warn(
- _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
+ _(b' %s: ambiguous merge - picked %s action\n')
+ % (f, m.__bytes__())
)
mresult.addfile(f, *l[0])
continue
@@ -1404,6 +1397,34 @@
atomictemp=atomictemp,
)
if wantfiledata:
+ # XXX note that there is a race window between the time we
+ # write the clean data into the file and we stats it. So another
+ # writing process meddling with the file content right after we
+ # wrote it could cause bad stat data to be gathered.
+ #
+ # They are 2 data we gather here
+ # - the mode:
+ # That we actually just wrote, we should not need to read
+ # it from disk, (except not all mode might have survived
+ # the disk round-trip, which is another issue: we should
+ # not depends on this)
+ # - the mtime,
+ # On system that support nanosecond precision, the mtime
+ # could be accurate enough to tell the two writes appart.
+ # However gathering it in a racy way make the mtime we
+ # gather "unreliable".
+ #
+ # (note: we get the size from the data we write, which is sane)
+ #
+ # So in theory the data returned here are fully racy, but in
+ # practice "it works mostly fine".
+ #
+ # Do not be surprised if you end up reading this while looking
+ # for the causes of some buggy status. Feel free to improve
+ # this in the future, but we cannot simply stop gathering
+ # information. Otherwise `hg status` call made after a large `hg
+ # update` runs would have to redo a similar amount of work to
+ # restore and compare all files content.
s = wfctx.lstat()
mode = s.st_mode
mtime = timestamp.mtime_of(s)
@@ -1495,7 +1516,8 @@
# mergestate so that it can be reused on commit
ms.addcommitinfo(f, op)
- numupdates = mresult.len() - mresult.len(mergestatemod.NO_OP_ACTIONS)
+ num_no_op = mresult.len(mergestatemod.MergeAction.NO_OP_ACTIONS)
+ numupdates = mresult.len() - num_no_op
progress = repo.ui.makeprogress(
_(b'updating'), unit=_(b'files'), total=numupdates
)
@@ -1599,9 +1621,9 @@
progress.increment(item=f)
# keep (noop, just log it)
- for a in mergestatemod.NO_OP_ACTIONS:
+ for a in mergestatemod.MergeAction.NO_OP_ACTIONS:
for f, args, msg in mresult.getactions((a,), sort=True):
- repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a))
+ repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a.__bytes__()))
# no progress
# directory rename, move local
@@ -1690,10 +1712,8 @@
)
try:
- # premerge
- tocomplete = []
for f, args, msg in mergeactions:
- repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
+ repo.ui.debug(b" %s: %s -> m\n" % (f, msg))
ms.addcommitinfo(f, {b'merged': b'yes'})
progress.increment(item=f)
if f == b'.hgsubstate': # subrepo states need updating
@@ -1702,16 +1722,6 @@
)
continue
wctx[f].audit()
- complete, r = ms.preresolve(f, wctx)
- if not complete:
- numupdates += 1
- tocomplete.append((f, args, msg))
-
- # merge
- for f, args, msg in tocomplete:
- repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
- ms.addcommitinfo(f, {b'merged': b'yes'})
- progress.increment(item=f, total=numupdates)
ms.resolve(f, wctx)
except error.InterventionRequired:
@@ -1823,7 +1833,7 @@
If false, merging with an ancestor (fast-forward) is only allowed
between different named branches. This flag is used by rebase extension
as a temporary fix and should be avoided in general.
- labels = labels to use for base, local and other
+ labels = labels to use for local, other, and base
mergeforce = whether the merge was run with 'merge --force' (deprecated): if
this is True, then 'force' should be True as well.
@@ -1875,22 +1885,11 @@
# updatecheck='abort' to better suppport some of these callers.
if updatecheck is None:
updatecheck = UPDATECHECK_LINEAR
- if updatecheck not in (
- UPDATECHECK_NONE,
- UPDATECHECK_LINEAR,
- UPDATECHECK_NO_CONFLICT,
- ):
- raise ValueError(
- r'Invalid updatecheck %r (can accept %r)'
- % (
- updatecheck,
- (
- UPDATECHECK_NONE,
- UPDATECHECK_LINEAR,
- UPDATECHECK_NO_CONFLICT,
- ),
- )
- )
+ okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT)
+ if updatecheck not in okay:
+ msg = r'Invalid updatecheck %r (can accept %r)'
+ msg %= (updatecheck, okay)
+ raise ValueError(msg)
if wc is not None and wc.isinmemory():
maybe_wlock = util.nullcontextmanager()
else:
@@ -1919,29 +1918,22 @@
raise error.StateError(_(b"outstanding uncommitted merge"))
ms = wc.mergestate()
if ms.unresolvedcount():
- raise error.StateError(
- _(b"outstanding merge conflicts"),
- hint=_(b"use 'hg resolve' to resolve"),
- )
+ msg = _(b"outstanding merge conflicts")
+ hint = _(b"use 'hg resolve' to resolve")
+ raise error.StateError(msg, hint=hint)
if branchmerge:
+ m_a = _(b"merging with a working directory ancestor has no effect")
if pas == [p2]:
- raise error.Abort(
- _(
- b"merging with a working directory ancestor"
- b" has no effect"
- )
- )
+ raise error.Abort(m_a)
elif pas == [p1]:
if not mergeancestor and wc.branch() == p2.branch():
- raise error.Abort(
- _(b"nothing to merge"),
- hint=_(b"use 'hg update' or check 'hg heads'"),
- )
+ msg = _(b"nothing to merge")
+ hint = _(b"use 'hg update' or check 'hg heads'")
+ raise error.Abort(msg, hint=hint)
if not force and (wc.files() or wc.deleted()):
- raise error.StateError(
- _(b"uncommitted changes"),
- hint=_(b"use 'hg status' to list changes"),
- )
+ msg = _(b"uncommitted changes")
+ hint = _(b"use 'hg status' to list changes")
+ raise error.StateError(msg, hint=hint)
if not wc.isinmemory():
for s in sorted(wc.substate):
wc.sub(s).bailifchanged()
@@ -2144,6 +2136,71 @@
mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
)
with repo.dirstate.parentchange():
+ ### Filter Filedata
+ #
+ # We gathered "cache" information for the clean file while
+ # updating them: mtime, size and mode.
+ #
+ # At the time this comment is written, they are various issues
+ # with how we gather the `mode` and `mtime` information (see
+ # the comment in `batchget`).
+ #
+ # We are going to smooth one of this issue here : mtime ambiguity.
+ #
+ # i.e. even if the mtime gathered during `batchget` was
+ # correct[1] a change happening right after it could change the
+ # content while keeping the same mtime[2].
+ #
+ # When we reach the current code, the "on disk" part of the
+ # update operation is finished. We still assume that no other
+ # process raced that "on disk" part, but we want to at least
+ # prevent later file change to alter the content of the file
+ # right after the update operation. So quickly that the same
+ # mtime is record for the operation.
+ # To prevent such ambiguity to happens, we will only keep the
+ # "file data" for files with mtime that are stricly in the past,
+ # i.e. whose mtime is strictly lower than the current time.
+ #
+ # This protect us from race conditions from operation that could
+ # run right after this one, especially other Mercurial
+ # operation that could be waiting for the wlock to touch files
+ # content and the dirstate.
+ #
+ # In an ideal world, we could only get reliable information in
+ # `getfiledata` (from `getbatch`), however the current approach
+ # have been a successful compromise since many years.
+ #
+ # At the time this comment is written, not using any "cache"
+ # file data at all here would not be viable. As it would result is
+ # a very large amount of work (equivalent to the previous `hg
+ # update` during the next status after an update).
+ #
+ # [1] the current code cannot grantee that the `mtime` and
+ # `mode` are correct, but the result is "okay in practice".
+ # (see the comment in `batchget`). #
+ #
+ # [2] using nano-second precision can greatly help here because
+ # it makes the "different write with same mtime" issue
+ # virtually vanish. However, dirstate v1 cannot store such
+ # precision and a bunch of python-runtime, operating-system and
+ # filesystem does not provide use with such precision, so we
+ # have to operate as if it wasn't available.
+ if getfiledata:
+ ambiguous_mtime = {}
+ now = timestamp.get_fs_now(repo.vfs)
+ if now is None:
+ # we can't write to the FS, so we won't actually update
+ # the dirstate content anyway, no need to put cache
+ # information.
+ getfiledata = None
+ else:
+ now_sec = now[0]
+ for f, m in pycompat.iteritems(getfiledata):
+ if m is not None and m[2][0] >= now_sec:
+ ambiguous_mtime[f] = (m[0], m[1], None)
+ for f, m in pycompat.iteritems(ambiguous_mtime):
+ getfiledata[f] = m
+
repo.setparents(fp1, fp2)
mergestatemod.recordupdates(
repo, mresult.actionsdict, branchmerge, getfiledata
@@ -2199,7 +2256,7 @@
ctx.rev(),
branchmerge=False,
force=False,
- labels=[b'working copy', b'destination'],
+ labels=[b'working copy', b'destination', b'working copy parent'],
updatecheck=updatecheck,
wc=wc,
)
@@ -2311,9 +2368,8 @@
def back_out(ctx, parent=None, wc=None):
if parent is None:
if ctx.p2() is not None:
- raise error.ProgrammingError(
- b"must specify parent of merge commit to back out"
- )
+ msg = b"must specify parent of merge commit to back out"
+ raise error.ProgrammingError(msg)
parent = ctx.p1()
return _update(
ctx.repo(),
@@ -2386,13 +2442,13 @@
if confirm:
nb_ignored = len(status.ignored)
- nb_unkown = len(status.unknown)
- if nb_unkown and nb_ignored:
- msg = _(b"permanently delete %d unkown and %d ignored files?")
- msg %= (nb_unkown, nb_ignored)
- elif nb_unkown:
- msg = _(b"permanently delete %d unkown files?")
- msg %= nb_unkown
+ nb_unknown = len(status.unknown)
+ if nb_unknown and nb_ignored:
+ msg = _(b"permanently delete %d unknown and %d ignored files?")
+ msg %= (nb_unknown, nb_ignored)
+ elif nb_unknown:
+ msg = _(b"permanently delete %d unknown files?")
+ msg %= nb_unknown
elif nb_ignored:
msg = _(b"permanently delete %d ignored files?")
msg %= nb_ignored
--- a/mercurial/mergestate.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/mergestate.py Fri Feb 18 14:27:43 2022 +0100
@@ -4,6 +4,7 @@
import errno
import shutil
import struct
+import weakref
from .i18n import _
from .node import (
@@ -97,36 +98,102 @@
# This record was release in 3.7 and usage was removed in 5.6
LEGACY_MERGE_DRIVER_MERGE = b'D'
+CHANGE_ADDED = b'added'
+CHANGE_REMOVED = b'removed'
+CHANGE_MODIFIED = b'modified'
-ACTION_FORGET = b'f'
-ACTION_REMOVE = b'r'
-ACTION_ADD = b'a'
-ACTION_GET = b'g'
-ACTION_PATH_CONFLICT = b'p'
-ACTION_PATH_CONFLICT_RESOLVE = b'pr'
-ACTION_ADD_MODIFIED = b'am'
-ACTION_CREATED = b'c'
-ACTION_DELETED_CHANGED = b'dc'
-ACTION_CHANGED_DELETED = b'cd'
-ACTION_MERGE = b'm'
-ACTION_LOCAL_DIR_RENAME_GET = b'dg'
-ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
-ACTION_KEEP = b'k'
+
+class MergeAction(object):
+ """represent an "action" merge need to take for a given file
+
+ Attributes:
+
+ _short: internal representation used to identify each action
+
+ no_op: True if the action does affect the file content or tracking status
+
+ narrow_safe:
+ True if the action can be safely used for a file outside of the narrow
+ set
+
+ changes:
+ The types of changes that this actions involves. This is a work in
+ progress and not all actions have one yet. In addition, some requires
+ user changes and cannot be fully decided. The value currently available
+ are:
+
+ - ADDED: the files is new in both parents
+ - REMOVED: the files existed in one parent and is getting removed
+ - MODIFIED: the files existed in at least one parent and is getting changed
+ """
+
+ ALL_ACTIONS = weakref.WeakSet()
+ NO_OP_ACTIONS = weakref.WeakSet()
+
+ def __init__(self, short, no_op=False, narrow_safe=False, changes=None):
+ self._short = short
+ self.ALL_ACTIONS.add(self)
+ self.no_op = no_op
+ if self.no_op:
+ self.NO_OP_ACTIONS.add(self)
+ self.narrow_safe = narrow_safe
+ self.changes = changes
+
+ def __hash__(self):
+ return hash(self._short)
+
+ def __repr__(self):
+ return 'MergeAction<%s>' % self._short.decode('ascii')
+
+ def __bytes__(self):
+ return self._short
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ assert isinstance(other, MergeAction)
+ return self._short == other._short
+
+ def __lt__(self, other):
+ return self._short < other._short
+
+
+ACTION_FORGET = MergeAction(b'f', narrow_safe=True, changes=CHANGE_REMOVED)
+ACTION_REMOVE = MergeAction(b'r', narrow_safe=True, changes=CHANGE_REMOVED)
+ACTION_ADD = MergeAction(b'a', narrow_safe=True, changes=CHANGE_ADDED)
+ACTION_GET = MergeAction(b'g', narrow_safe=True, changes=CHANGE_MODIFIED)
+ACTION_PATH_CONFLICT = MergeAction(b'p')
+ACTION_PATH_CONFLICT_RESOLVE = MergeAction('pr')
+ACTION_ADD_MODIFIED = MergeAction(
+ b'am', narrow_safe=True, changes=CHANGE_ADDED
+) # not 100% about the changes value here
+ACTION_CREATED = MergeAction(b'c', narrow_safe=True, changes=CHANGE_ADDED)
+ACTION_DELETED_CHANGED = MergeAction(b'dc')
+ACTION_CHANGED_DELETED = MergeAction(b'cd')
+ACTION_MERGE = MergeAction(b'm')
+ACTION_LOCAL_DIR_RENAME_GET = MergeAction(b'dg')
+ACTION_DIR_RENAME_MOVE_LOCAL = MergeAction(b'dm')
+ACTION_KEEP = MergeAction(b'k', no_op=True)
# the file was absent on local side before merge and we should
# keep it absent (absent means file not present, it can be a result
# of file deletion, rename etc.)
-ACTION_KEEP_ABSENT = b'ka'
+ACTION_KEEP_ABSENT = MergeAction(b'ka', no_op=True)
# the file is absent on the ancestor and remote side of the merge
# hence this file is new and we should keep it
-ACTION_KEEP_NEW = b'kn'
-ACTION_EXEC = b'e'
-ACTION_CREATED_MERGE = b'cm'
+ACTION_KEEP_NEW = MergeAction(b'kn', no_op=True)
+ACTION_EXEC = MergeAction(b'e', narrow_safe=True, changes=CHANGE_MODIFIED)
+ACTION_CREATED_MERGE = MergeAction(
+ b'cm', narrow_safe=True, changes=CHANGE_ADDED
+)
+
-# actions which are no op
-NO_OP_ACTIONS = (
- ACTION_KEEP,
- ACTION_KEEP_ABSENT,
- ACTION_KEEP_NEW,
+# Used by concert to detect situation it does not like, not sure what the exact
+# criteria is
+CONVERT_MERGE_ACTIONS = (
+ ACTION_MERGE,
+ ACTION_DIR_RENAME_MOVE_LOCAL,
+ ACTION_CHANGED_DELETED,
+ ACTION_DELETED_CHANGED,
)
@@ -313,16 +380,15 @@
"""return extras stored with the mergestate for the given filename"""
return self._stateextras[filename]
- def _resolve(self, preresolve, dfile, wctx):
- """rerun merge process for file path `dfile`.
- Returns whether the merge was completed and the return value of merge
- obtained from filemerge._filemerge().
- """
+ def resolve(self, dfile, wctx):
+ """run merge process for dfile
+
+ Returns the exit code of the merge."""
if self[dfile] in (
MERGE_RECORD_RESOLVED,
LEGACY_RECORD_DRIVER_RESOLVED,
):
- return True, 0
+ return 0
stateentry = self._state[dfile]
state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
octx = self._repo[self._other]
@@ -341,84 +407,63 @@
fla = fca.flags()
if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
if fca.rev() == nullrev and flags != flo:
- if preresolve:
- self._repo.ui.warn(
- _(
- b'warning: cannot merge flags for %s '
- b'without common ancestor - keeping local flags\n'
- )
- % afile
+ self._repo.ui.warn(
+ _(
+ b'warning: cannot merge flags for %s '
+ b'without common ancestor - keeping local flags\n'
)
+ % afile
+ )
elif flags == fla:
flags = flo
- if preresolve:
- # restore local
- if localkey != self._repo.nodeconstants.nullhex:
- self._restore_backup(wctx[dfile], localkey, flags)
- else:
- wctx[dfile].remove(ignoremissing=True)
- complete, merge_ret, deleted = filemerge.premerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
+ # restore local
+ if localkey != self._repo.nodeconstants.nullhex:
+ self._restore_backup(wctx[dfile], localkey, flags)
else:
- complete, merge_ret, deleted = filemerge.filemerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
- if merge_ret is None:
+ wctx[dfile].remove(ignoremissing=True)
+
+ if not fco.cmp(fcd): # files identical?
# If return value of merge is None, then there are no real conflict
del self._state[dfile]
+ self._results[dfile] = None, None
self._dirty = True
- elif not merge_ret:
+ return None
+
+ merge_ret, deleted = filemerge.filemerge(
+ self._repo,
+ wctx,
+ self._local,
+ lfile,
+ fcd,
+ fco,
+ fca,
+ labels=self._labels,
+ )
+
+ if not merge_ret:
self.mark(dfile, MERGE_RECORD_RESOLVED)
- if complete:
- action = None
- if deleted:
- if fcd.isabsent():
- # dc: local picked. Need to drop if present, which may
- # happen on re-resolves.
- action = ACTION_FORGET
- else:
- # cd: remote picked (or otherwise deleted)
- action = ACTION_REMOVE
+ action = None
+ if deleted:
+ if fcd.isabsent():
+ # dc: local picked. Need to drop if present, which may
+ # happen on re-resolves.
+ action = ACTION_FORGET
else:
- if fcd.isabsent(): # dc: remote picked
- action = ACTION_GET
- elif fco.isabsent(): # cd: local picked
- if dfile in self.localctx:
- action = ACTION_ADD_MODIFIED
- else:
- action = ACTION_ADD
- # else: regular merges (no action necessary)
- self._results[dfile] = merge_ret, action
-
- return complete, merge_ret
+ # cd: remote picked (or otherwise deleted)
+ action = ACTION_REMOVE
+ else:
+ if fcd.isabsent(): # dc: remote picked
+ action = ACTION_GET
+ elif fco.isabsent(): # cd: local picked
+ if dfile in self.localctx:
+ action = ACTION_ADD_MODIFIED
+ else:
+ action = ACTION_ADD
+ # else: regular merges (no action necessary)
+ self._results[dfile] = merge_ret, action
- def preresolve(self, dfile, wctx):
- """run premerge process for dfile
-
- Returns whether the merge is complete, and the exit code."""
- return self._resolve(True, dfile, wctx)
-
- def resolve(self, dfile, wctx):
- """run merge process (assuming premerge was run) for dfile
-
- Returns the exit code of the merge."""
- return self._resolve(False, dfile, wctx)[1]
+ return merge_ret
def counts(self):
"""return counts for updated, merged and removed files in this
--- a/mercurial/narrowspec.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/narrowspec.py Fri Feb 18 14:27:43 2022 +0100
@@ -109,23 +109,24 @@
and patterns that are loaded from sources that use the internal,
prefixed pattern representation (but can't necessarily be fully trusted).
"""
- if not isinstance(pats, set):
- raise error.ProgrammingError(
- b'narrow patterns should be a set; got %r' % pats
- )
+ with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
+ if not isinstance(pats, set):
+ raise error.ProgrammingError(
+ b'narrow patterns should be a set; got %r' % pats
+ )
- for pat in pats:
- if not pat.startswith(VALID_PREFIXES):
- # Use a Mercurial exception because this can happen due to user
- # bugs (e.g. manually updating spec file).
- raise error.Abort(
- _(b'invalid prefix on narrow pattern: %s') % pat,
- hint=_(
- b'narrow patterns must begin with one of '
- b'the following: %s'
+ for pat in pats:
+ if not pat.startswith(VALID_PREFIXES):
+ # Use a Mercurial exception because this can happen due to user
+ # bugs (e.g. manually updating spec file).
+ raise error.Abort(
+ _(b'invalid prefix on narrow pattern: %s') % pat,
+ hint=_(
+ b'narrow patterns must begin with one of '
+ b'the following: %s'
+ )
+ % b', '.join(VALID_PREFIXES),
)
- % b', '.join(VALID_PREFIXES),
- )
def format(includes, excludes):
@@ -323,7 +324,7 @@
removedmatch = matchmod.differencematcher(oldmatch, newmatch)
ds = repo.dirstate
- lookup, status = ds.status(
+ lookup, status, _mtime_boundary = ds.status(
removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
)
trackeddirty = status.modified + status.added
--- a/mercurial/obsolete.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/obsolete.py Fri Feb 18 14:27:43 2022 +0100
@@ -73,10 +73,6 @@
import struct
from .i18n import _
-from .node import (
- bin,
- hex,
-)
from .pycompat import getattr
from .node import (
bin,
@@ -579,6 +575,12 @@
return len(self._all)
def __nonzero__(self):
+ from . import statichttprepo
+
+ if isinstance(self.repo, statichttprepo.statichttprepository):
+ # If repo is accessed via static HTTP, then we can't use os.stat()
+ # to just peek at the file size.
+ return len(self._data) > 1
if not self._cached('_all'):
try:
return self.svfs.stat(b'obsstore').st_size > 1
@@ -944,8 +946,7 @@
getnode = repo.changelog.node
notpublic = _mutablerevs(repo)
isobs = repo.obsstore.successors.__contains__
- obs = {r for r in notpublic if isobs(getnode(r))}
- return obs
+ return frozenset(r for r in notpublic if isobs(getnode(r)))
@cachefor(b'orphan')
@@ -963,14 +964,14 @@
if p in obsolete or p in unstable:
unstable.add(r)
break
- return unstable
+ return frozenset(unstable)
@cachefor(b'suspended')
def _computesuspendedset(repo):
"""the set of obsolete parents with non obsolete descendants"""
suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
- return {r for r in getrevs(repo, b'obsolete') if r in suspended}
+ return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended)
@cachefor(b'extinct')
@@ -1002,7 +1003,7 @@
# we have a public predecessor
bumped.add(rev)
break # Next draft!
- return bumped
+ return frozenset(bumped)
@cachefor(b'contentdivergent')
@@ -1029,7 +1030,7 @@
divergent.add(rev)
break
toprocess.update(obsstore.predecessors.get(prec, ()))
- return divergent
+ return frozenset(divergent)
def makefoldid(relation, user):
--- a/mercurial/obsutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/obsutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -218,7 +218,7 @@
or
- # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
+ # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally)
#
# <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
--- a/mercurial/patch.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/patch.py Fri Feb 18 14:27:43 2022 +0100
@@ -55,6 +55,8 @@
)
PatchError = error.PatchError
+PatchParseError = error.PatchParseError
+PatchApplicationError = error.PatchApplicationError
# public functions
@@ -107,7 +109,9 @@
def mimesplit(stream, cur):
def msgfp(m):
fp = stringio()
+ # pytype: disable=wrong-arg-types
g = mail.Generator(fp, mangle_from_=False)
+ # pytype: enable=wrong-arg-types
g.flatten(m)
fp.seek(0)
return fp
@@ -553,7 +557,9 @@
if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists(
fname
):
- raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
+ raise PatchApplicationError(
+ _(b'cannot patch %s: file is not tracked') % fname
+ )
def setfile(self, fname, data, mode, copysource):
self._checkknown(fname)
@@ -637,7 +643,9 @@
def _checkknown(self, fname):
if fname not in self.ctx:
- raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
+ raise PatchApplicationError(
+ _(b'cannot patch %s: file is not tracked') % fname
+ )
def getfile(self, fname):
try:
@@ -793,7 +801,7 @@
def apply(self, h):
if not h.complete():
- raise PatchError(
+ raise PatchParseError(
_(b"bad hunk #%d %s (%d %d %d %d)")
% (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
)
@@ -1388,7 +1396,7 @@
def read_unified_hunk(self, lr):
m = unidesc.match(self.desc)
if not m:
- raise PatchError(_(b"bad hunk #%d") % self.number)
+ raise PatchParseError(_(b"bad hunk #%d") % self.number)
self.starta, self.lena, self.startb, self.lenb = m.groups()
if self.lena is None:
self.lena = 1
@@ -1405,7 +1413,7 @@
lr, self.hunk, self.lena, self.lenb, self.a, self.b
)
except error.ParseError as e:
- raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
+ raise PatchParseError(_(b"bad hunk #%d: %s") % (self.number, e))
# if we hit eof before finishing out the hunk, the last line will
# be zero length. Lets try to fix it up.
while len(self.hunk[-1]) == 0:
@@ -1420,7 +1428,7 @@
self.desc = lr.readline()
m = contextdesc.match(self.desc)
if not m:
- raise PatchError(_(b"bad hunk #%d") % self.number)
+ raise PatchParseError(_(b"bad hunk #%d") % self.number)
self.starta, aend = m.groups()
self.starta = int(self.starta)
if aend is None:
@@ -1440,7 +1448,7 @@
elif l.startswith(b' '):
u = b' ' + s
else:
- raise PatchError(
+ raise PatchParseError(
_(b"bad hunk #%d old text line %d") % (self.number, x)
)
self.a.append(u)
@@ -1454,7 +1462,7 @@
l = lr.readline()
m = contextdesc.match(l)
if not m:
- raise PatchError(_(b"bad hunk #%d") % self.number)
+ raise PatchParseError(_(b"bad hunk #%d") % self.number)
self.startb, bend = m.groups()
self.startb = int(self.startb)
if bend is None:
@@ -1487,7 +1495,7 @@
lr.push(l)
break
else:
- raise PatchError(
+ raise PatchParseError(
_(b"bad hunk #%d old text line %d") % (self.number, x)
)
self.b.append(s)
@@ -1601,7 +1609,7 @@
while True:
line = getline(lr, self.hunk)
if not line:
- raise PatchError(
+ raise PatchParseError(
_(b'could not extract "%s" binary data') % self._fname
)
if line.startswith(b'literal '):
@@ -1622,14 +1630,14 @@
try:
dec.append(util.b85decode(line[1:])[:l])
except ValueError as e:
- raise PatchError(
+ raise PatchParseError(
_(b'could not decode "%s" binary patch: %s')
% (self._fname, stringutil.forcebytestr(e))
)
line = getline(lr, self.hunk)
text = zlib.decompress(b''.join(dec))
if len(text) != size:
- raise PatchError(
+ raise PatchParseError(
_(b'"%s" length is %d bytes, should be %d')
% (self._fname, len(text), size)
)
@@ -1847,7 +1855,7 @@
try:
p.transitions[state][newstate](p, data)
except KeyError:
- raise PatchError(
+ raise PatchParseError(
b'unhandled transition: %s -> %s' % (state, newstate)
)
state = newstate
@@ -1874,7 +1882,7 @@
('a//b/', 'd/e/c')
>>> pathtransform(b'a/b/c', 3, b'')
Traceback (most recent call last):
- PatchError: unable to strip away 1 of 3 dirs from a/b/c
+ PatchApplicationError: unable to strip away 1 of 3 dirs from a/b/c
"""
pathlen = len(path)
i = 0
@@ -1884,7 +1892,7 @@
while count > 0:
i = path.find(b'/', i)
if i == -1:
- raise PatchError(
+ raise PatchApplicationError(
_(b"unable to strip away %d of %d dirs from %s")
% (count, strip, path)
)
@@ -1947,7 +1955,7 @@
elif not nulla:
fname = afile
else:
- raise PatchError(_(b"undefined source and destination files"))
+ raise PatchParseError(_(b"undefined source and destination files"))
gp = patchmeta(fname)
if create:
@@ -2097,7 +2105,7 @@
gp.copy(),
)
if not gitpatches:
- raise PatchError(
+ raise PatchParseError(
_(b'failed to synchronize metadata for "%s"') % afile[2:]
)
newfile = True
@@ -2193,7 +2201,7 @@
out += binchunk[i:offset_end]
i += cmd
else:
- raise PatchError(_(b'unexpected delta opcode 0'))
+ raise PatchApplicationError(_(b'unexpected delta opcode 0'))
return out
@@ -2270,7 +2278,7 @@
data, mode = store.getfile(gp.oldpath)[:2]
if data is None:
# This means that the old path does not exist
- raise PatchError(
+ raise PatchApplicationError(
_(b"source file '%s' does not exist") % gp.oldpath
)
if gp.mode:
@@ -2283,7 +2291,7 @@
if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
gp.path
):
- raise PatchError(
+ raise PatchApplicationError(
_(
b"cannot create %s: destination "
b"already exists"
@@ -2365,7 +2373,7 @@
scmutil.marktouched(repo, files, similarity)
code = fp.close()
if code:
- raise PatchError(
+ raise PatchApplicationError(
_(b"patch command failed: %s") % procutil.explainexit(code)
)
return fuzz
@@ -2397,7 +2405,7 @@
files.update(backend.close())
store.close()
if ret < 0:
- raise PatchError(_(b'patch failed to apply'))
+ raise PatchApplicationError(_(b'patch failed to apply'))
return ret > 0
--- a/mercurial/pathutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/pathutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -79,20 +79,24 @@
return
# AIX ignores "/" at end of path, others raise EISDIR.
if util.endswithsep(path):
- raise error.Abort(_(b"path ends in directory separator: %s") % path)
+ raise error.InputError(
+ _(b"path ends in directory separator: %s") % path
+ )
parts = util.splitpath(path)
if (
os.path.splitdrive(path)[0]
or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
or pycompat.ospardir in parts
):
- raise error.Abort(_(b"path contains illegal component: %s") % path)
+ raise error.InputError(
+ _(b"path contains illegal component: %s") % path
+ )
# Windows shortname aliases
for p in parts:
if b"~" in p:
first, last = p.split(b"~", 1)
if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
- raise error.Abort(
+ raise error.InputError(
_(b"path contains illegal component: %s") % path
)
if b'.hg' in _lowerclean(path):
@@ -101,7 +105,7 @@
if p in lparts[1:]:
pos = lparts.index(p)
base = os.path.join(*parts[:pos])
- raise error.Abort(
+ raise error.InputError(
_(b"path '%s' is inside nested repo %r")
% (path, pycompat.bytestr(base))
)
--- a/mercurial/pure/parsers.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/pure/parsers.py Fri Feb 18 14:27:43 2022 +0100
@@ -104,6 +104,7 @@
_mtime_ns = attr.ib()
_fallback_exec = attr.ib()
_fallback_symlink = attr.ib()
+ _mtime_second_ambiguous = attr.ib()
def __init__(
self,
@@ -127,24 +128,27 @@
self._size = None
self._mtime_s = None
self._mtime_ns = None
+ self._mtime_second_ambiguous = False
if parentfiledata is None:
has_meaningful_mtime = False
has_meaningful_data = False
+ elif parentfiledata[2] is None:
+ has_meaningful_mtime = False
if has_meaningful_data:
self._mode = parentfiledata[0]
self._size = parentfiledata[1]
if has_meaningful_mtime:
- self._mtime_s, self._mtime_ns = parentfiledata[2]
+ (
+ self._mtime_s,
+ self._mtime_ns,
+ self._mtime_second_ambiguous,
+ ) = parentfiledata[2]
@classmethod
def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
"""Build a new DirstateItem object from V2 data"""
has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
- if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS:
- # The current code is not able to do the more subtle comparison that the
- # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
- has_meaningful_mtime = False
mode = None
if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
@@ -171,13 +175,15 @@
mode |= stat.S_IFLNK
else:
mode |= stat.S_IFREG
+
+ second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
return cls(
wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
has_meaningful_data=has_mode_size,
has_meaningful_mtime=has_meaningful_mtime,
- parentfiledata=(mode, size, (mtime_s, mtime_ns)),
+ parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)),
fallback_exec=fallback_exec,
fallback_symlink=fallback_symlink,
)
@@ -214,13 +220,13 @@
wc_tracked=True,
p1_tracked=True,
has_meaningful_mtime=False,
- parentfiledata=(mode, size, (42, 0)),
+ parentfiledata=(mode, size, (42, 0, False)),
)
else:
return cls(
wc_tracked=True,
p1_tracked=True,
- parentfiledata=(mode, size, (mtime, 0)),
+ parentfiledata=(mode, size, (mtime, 0, False)),
)
else:
raise RuntimeError(b'unknown state: %s' % state)
@@ -246,7 +252,7 @@
self._p1_tracked = True
self._mode = mode
self._size = size
- self._mtime_s, self._mtime_ns = mtime
+ self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime
def set_tracked(self):
"""mark a file as tracked in the working copy
@@ -301,10 +307,22 @@
if self_sec is None:
return False
self_ns = self._mtime_ns
- other_sec, other_ns = other_mtime
- return self_sec == other_sec and (
- self_ns == other_ns or self_ns == 0 or other_ns == 0
- )
+ other_sec, other_ns, second_ambiguous = other_mtime
+ if self_sec != other_sec:
+ # seconds are different theses mtime are definitly not equal
+ return False
+ elif other_ns == 0 or self_ns == 0:
+ # at least one side as no nano-seconds information
+
+ if self._mtime_second_ambiguous:
+ # We cannot trust the mtime in this case
+ return False
+ else:
+ # the "seconds" value was reliable on its own. We are good to go.
+ return True
+ else:
+ # We have nano second information, let us use them !
+ return self_ns == other_ns
@property
def state(self):
@@ -463,6 +481,8 @@
flags |= DIRSTATE_V2_MODE_IS_SYMLINK
if self._mtime_s is not None:
flags |= DIRSTATE_V2_HAS_MTIME
+ if self._mtime_second_ambiguous:
+ flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
if self._fallback_exec is not None:
flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
@@ -531,13 +551,11 @@
return AMBIGUOUS_TIME
elif not self._p1_tracked:
return AMBIGUOUS_TIME
+ elif self._mtime_second_ambiguous:
+ return AMBIGUOUS_TIME
else:
return self._mtime_s
- def need_delay(self, now):
- """True if the stored mtime would be ambiguous with the current time"""
- return self.v1_state() == b'n' and self._mtime_s == now[0]
-
def gettype(q):
return int(q & 0xFFFF)
@@ -566,18 +584,13 @@
0,
revlog_constants.COMP_MODE_INLINE,
revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.RANK_UNKNOWN,
)
@util.propertycache
def entry_size(self):
return self.index_format.size
- @property
- def nodemap(self):
- msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self._nodemap
-
@util.propertycache
def _nodemap(self):
nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
@@ -629,7 +642,7 @@
if not isinstance(i, int):
raise TypeError(b"expecting int indexes")
if i < 0 or i >= len(self):
- raise IndexError
+ raise IndexError(i)
def __getitem__(self, i):
if i == -1:
@@ -653,6 +666,7 @@
0,
revlog_constants.COMP_MODE_INLINE,
revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.RANK_UNKNOWN,
)
return r
@@ -785,9 +799,14 @@
return self._offsets[i]
-def parse_index2(data, inline, revlogv2=False):
+def parse_index2(data, inline, format=revlog_constants.REVLOGV1):
+ if format == revlog_constants.CHANGELOGV2:
+ return parse_index_cl_v2(data)
if not inline:
- cls = IndexObject2 if revlogv2 else IndexObject
+ if format == revlog_constants.REVLOGV2:
+ cls = IndexObject2
+ else:
+ cls = IndexObject
return cls(data), None
cls = InlinedIndexObject
return cls(data, inline), (0, data)
@@ -835,7 +854,7 @@
entry = data[:10]
data_comp = data[10] & 3
sidedata_comp = (data[10] & (3 << 2)) >> 2
- return entry + (data_comp, sidedata_comp)
+ return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN)
def _pack_entry(self, rev, entry):
data = entry[:10]
@@ -860,20 +879,53 @@
class IndexChangelogV2(IndexObject2):
index_format = revlog_constants.INDEX_ENTRY_CL_V2
+ null_item = (
+ IndexObject2.null_item[: revlog_constants.ENTRY_RANK]
+ + (0,) # rank of null is 0
+ + IndexObject2.null_item[revlog_constants.ENTRY_RANK :]
+ )
+
def _unpack_entry(self, rev, data, r=True):
items = self.index_format.unpack(data)
- entry = items[:3] + (rev, rev) + items[3:8]
- data_comp = items[8] & 3
- sidedata_comp = (items[8] >> 2) & 3
- return entry + (data_comp, sidedata_comp)
+ return (
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH],
+ rev,
+ rev,
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET],
+ items[
+ revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH
+ ],
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3,
+ (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2)
+ & 3,
+ items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK],
+ )
def _pack_entry(self, rev, entry):
- assert entry[3] == rev, entry[3]
- assert entry[4] == rev, entry[4]
- data = entry[:3] + entry[5:10]
- data_comp = entry[10] & 3
- sidedata_comp = (entry[11] & 3) << 2
- data += (data_comp | sidedata_comp,)
+
+ base = entry[revlog_constants.ENTRY_DELTA_BASE]
+ link_rev = entry[revlog_constants.ENTRY_LINK_REV]
+ assert base == rev, (base, rev)
+ assert link_rev == rev, (link_rev, rev)
+ data = (
+ entry[revlog_constants.ENTRY_DATA_OFFSET],
+ entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH],
+ entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH],
+ entry[revlog_constants.ENTRY_PARENT_1],
+ entry[revlog_constants.ENTRY_PARENT_2],
+ entry[revlog_constants.ENTRY_NODE_ID],
+ entry[revlog_constants.ENTRY_SIDEDATA_OFFSET],
+ entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH],
+ entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3
+ | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3)
+ << 2,
+ entry[revlog_constants.ENTRY_RANK],
+ )
return self.index_format.pack(*data)
@@ -903,23 +955,11 @@
return parents
-def pack_dirstate(dmap, copymap, pl, now):
+def pack_dirstate(dmap, copymap, pl):
cs = stringio()
write = cs.write
write(b"".join(pl))
for f, e in pycompat.iteritems(dmap):
- if e.need_delay(now):
- # The file was last modified "simultaneously" with the current
- # write to dirstate (i.e. within the same second for file-
- # systems with a granularity of 1 sec). This commonly happens
- # for at least a couple of files on 'update'.
- # The user could change the file without changing its size
- # within the same second. Invalidate the file's mtime in
- # dirstate, forcing future 'status' calls to compare the
- # contents of the file if the size is the same. This prevents
- # mistakenly treating such files as clean.
- e.set_possibly_dirty()
-
if f in copymap:
f = b"%s\0%s" % (f, copymap[f])
e = _pack(
--- a/mercurial/requirements.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/requirements.py Fri Feb 18 14:27:43 2022 +0100
@@ -7,11 +7,18 @@
from __future__ import absolute_import
+# obsolete experimental requirements:
+# - manifestv2: An experimental new manifest format that allowed
+# for stem compression of long paths. Experiment ended up not
+# being successful (repository sizes went up due to worse delta
+# chains), and the code was deleted in 4.6.
+
GENERALDELTA_REQUIREMENT = b'generaldelta'
DOTENCODE_REQUIREMENT = b'dotencode'
STORE_REQUIREMENT = b'store'
FNCACHE_REQUIREMENT = b'fncache'
+DIRSTATE_TRACKED_HINT_V1 = b'dirstate-tracked-key-v1'
DIRSTATE_V2_REQUIREMENT = b'dirstate-v2'
# When narrowing is finalized and no longer subject to format changes,
@@ -30,6 +37,9 @@
REVLOGV1_REQUIREMENT = b'revlogv1'
+# allow using ZSTD as compression engine for revlog content
+REVLOG_COMPRESSION_ZSTD = b'revlog-compression-zstd'
+
# Increment the sub-version when the revlog v2 format changes to lock out old
# clients.
CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
@@ -66,6 +76,10 @@
# `.hg/store/requires` are present.
SHARESAFE_REQUIREMENT = b'share-safe'
+# Bookmarks must be stored in the `store` part of the repository and will be
+# share accross shares
+BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore'
+
# List of requirements which are working directory specific
# These requirements cannot be shared between repositories if they
# share the same store
@@ -83,5 +97,25 @@
SHARED_REQUIREMENT,
RELATIVE_SHARED_REQUIREMENT,
SHARESAFE_REQUIREMENT,
+ DIRSTATE_TRACKED_HINT_V1,
DIRSTATE_V2_REQUIREMENT,
}
+
+# List of requirement that impact "stream-clone" (and hardlink clone) and
+# cannot be changed in such cases.
+#
+# requirements not in this list are safe to be altered during stream-clone.
+#
+# note: the list is currently inherited from previous code and miss some relevant requirement while containing some irrelevant ones.
+STREAM_FIXED_REQUIREMENTS = {
+ BOOKMARKS_IN_STORE_REQUIREMENT,
+ CHANGELOGV2_REQUIREMENT,
+ COPIESSDC_REQUIREMENT,
+ GENERALDELTA_REQUIREMENT,
+ INTERNAL_PHASE_REQUIREMENT,
+ REVLOG_COMPRESSION_ZSTD,
+ REVLOGV1_REQUIREMENT,
+ REVLOGV2_REQUIREMENT,
+ SPARSEREVLOG_REQUIREMENT,
+ TREEMANIFEST_REQUIREMENT,
+}
--- a/mercurial/revlog.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlog.py Fri Feb 18 14:27:43 2022 +0100
@@ -40,11 +40,13 @@
COMP_MODE_DEFAULT,
COMP_MODE_INLINE,
COMP_MODE_PLAIN,
+ ENTRY_RANK,
FEATURES_BY_VERSION,
FLAG_GENERALDELTA,
FLAG_INLINE_DATA,
INDEX_HEADER,
KIND_CHANGELOG,
+ RANK_UNKNOWN,
REVLOGV0,
REVLOGV1,
REVLOGV1_FLAGS,
@@ -101,6 +103,7 @@
REVLOGV0
REVLOGV1
REVLOGV2
+CHANGELOGV2
FLAG_INLINE_DATA
FLAG_GENERALDELTA
REVLOG_DEFAULT_FLAGS
@@ -199,16 +202,13 @@
def parse_index_v2(data, inline):
# call the C implementation to parse the index data
- index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+ index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
return index, cache
def parse_index_cl_v2(data, inline):
# call the C implementation to parse the index data
- assert not inline
- from .pure.parsers import parse_index_cl_v2
-
- index, cache = parse_index_cl_v2(data)
+ index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
return index, cache
@@ -741,21 +741,6 @@
"""iterate over all rev in this revlog (from start to stop)"""
return storageutil.iterrevs(len(self), start=start, stop=stop)
- @property
- def nodemap(self):
- msg = (
- b"revlog.nodemap is deprecated, "
- b"use revlog.index.[has_node|rev|get_rev]"
- )
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self.index.nodemap
-
- @property
- def _nodecache(self):
- msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self.index.nodemap
-
def hasnode(self, node):
try:
self.rev(node)
@@ -870,7 +855,23 @@
if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
return self.rawsize(rev)
- return len(self.revision(rev, raw=False))
+ return len(self.revision(rev))
+
+ def fast_rank(self, rev):
+ """Return the rank of a revision if already known, or None otherwise.
+
+ The rank of a revision is the size of the sub-graph it defines as a
+ head. Equivalently, the rank of a revision `r` is the size of the set
+ `ancestors(r)`, `r` included.
+
+ This method returns the rank retrieved from the revlog in constant
+ time. It makes no attempt at computing unknown values for versions of
+ the revlog which do not persist the rank.
+ """
+ rank = self.index[rev][ENTRY_RANK]
+ if rank == RANK_UNKNOWN:
+ return None
+ return rank
def chainbase(self, rev):
base = self._chainbasecache.get(rev)
@@ -1776,33 +1777,13 @@
return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
- def _processflags(self, text, flags, operation, raw=False):
- """deprecated entry point to access flag processors"""
- msg = b'_processflag(...) use the specialized variant'
- util.nouideprecwarn(msg, b'5.2', stacklevel=2)
- if raw:
- return text, flagutil.processflagsraw(self, text, flags)
- elif operation == b'read':
- return flagutil.processflagsread(self, text, flags)
- else: # write operation
- return flagutil.processflagswrite(self, text, flags)
-
- def revision(self, nodeorrev, _df=None, raw=False):
+ def revision(self, nodeorrev, _df=None):
"""return an uncompressed revision of a given node or revision
number.
_df - an existing file handle to read from. (internal-only)
- raw - an optional argument specifying if the revision data is to be
- treated as raw data when applying flag transforms. 'raw' should be set
- to True when generating changegroups or in debug commands.
"""
- if raw:
- msg = (
- b'revlog.revision(..., raw=True) is deprecated, '
- b'use revlog.rawdata(...)'
- )
- util.nouideprecwarn(msg, b'5.2', stacklevel=2)
- return self._revisiondata(nodeorrev, _df, raw=raw)
+ return self._revisiondata(nodeorrev, _df)
def sidedata(self, nodeorrev, _df=None):
"""a map of extra data related to the changeset but not part of the hash
@@ -2479,6 +2460,19 @@
# than ones we manually add.
sidedata_offset = 0
+ rank = RANK_UNKNOWN
+ if self._format_version == CHANGELOGV2:
+ if (p1r, p2r) == (nullrev, nullrev):
+ rank = 1
+ elif p1r != nullrev and p2r == nullrev:
+ rank = 1 + self.fast_rank(p1r)
+ elif p1r == nullrev and p2r != nullrev:
+ rank = 1 + self.fast_rank(p2r)
+ else: # merge node
+ pmin, pmax = sorted((p1r, p2r))
+ rank = 1 + self.fast_rank(pmax)
+ rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
+
e = revlogutils.entry(
flags=flags,
data_offset=offset,
@@ -2493,6 +2487,7 @@
sidedata_offset=sidedata_offset,
sidedata_compressed_length=len(serialized_sidedata),
sidedata_compression_mode=sidedata_compression_mode,
+ rank=rank,
)
self.index.append(e)
--- a/mercurial/revlogutils/__init__.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/__init__.py Fri Feb 18 14:27:43 2022 +0100
@@ -12,6 +12,7 @@
# See mercurial.revlogutils.constants for doc
COMP_MODE_INLINE = 2
+RANK_UNKNOWN = -1
def offset_type(offset, type):
@@ -34,6 +35,7 @@
sidedata_offset=0,
sidedata_compressed_length=0,
sidedata_compression_mode=COMP_MODE_INLINE,
+ rank=RANK_UNKNOWN,
):
"""Build one entry from symbolic name
@@ -56,6 +58,7 @@
sidedata_compressed_length,
data_compression_mode,
sidedata_compression_mode,
+ rank,
)
--- a/mercurial/revlogutils/constants.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/constants.py Fri Feb 18 14:27:43 2022 +0100
@@ -103,6 +103,17 @@
# (see "COMP_MODE_*" constants for details)
ENTRY_SIDEDATA_COMPRESSION_MODE = 11
+# [12] Revision rank:
+# The number of revision under this one.
+#
+# Formally this is defined as : rank(X) = len(ancestors(X) + X)
+#
+# If rank == -1; then we do not have this information available.
+# Only `null` has a rank of 0.
+ENTRY_RANK = 12
+
+RANK_UNKNOWN = -1
+
### main revlog header
# We cannot rely on Struct.format is inconsistent for python <=3.6 versus above
@@ -181,9 +192,20 @@
# 8 bytes: sidedata offset
# 4 bytes: sidedata compressed length
# 1 bytes: compression mode (2 lower bit are data_compression_mode)
-# 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
-INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x")
-assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size
+# 4 bytes: changeset rank (i.e. `len(::REV)`)
+# 23 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiBi23x")
+assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_CL_V2.size
+INDEX_ENTRY_V2_IDX_OFFSET = 0
+INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH = 1
+INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH = 2
+INDEX_ENTRY_V2_IDX_PARENT_1 = 3
+INDEX_ENTRY_V2_IDX_PARENT_2 = 4
+INDEX_ENTRY_V2_IDX_NODEID = 5
+INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET = 6
+INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH = 7
+INDEX_ENTRY_V2_IDX_COMPRESSION_MODE = 8
+INDEX_ENTRY_V2_IDX_RANK = 9
# revlog index flags
--- a/mercurial/revlogutils/deltas.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/deltas.py Fri Feb 18 14:27:43 2022 +0100
@@ -526,7 +526,7 @@
else:
# deltabase is rawtext before changed by flag processors, which is
# equivalent to non-raw text
- basetext = revlog.revision(baserev, _df=fh, raw=False)
+ basetext = revlog.revision(baserev, _df=fh)
fulltext = mdiff.patch(basetext, delta)
try:
--- a/mercurial/revlogutils/flagutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/flagutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -32,6 +32,7 @@
REVIDX_FLAGS_ORDER
REVIDX_RAWTEXT_CHANGING_FLAGS
+# Keep this in sync with REVIDX_KNOWN_FLAGS in rust/hg-core/src/revlog/revlog.rs
REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
# Store flag processors (cf. 'addflagprocessor()' to register)
--- a/mercurial/revlogutils/nodemap.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/nodemap.py Fri Feb 18 14:27:43 2022 +0100
@@ -16,6 +16,7 @@
from .. import (
error,
+ requirements,
util,
)
from . import docket as docket_mod
@@ -34,6 +35,19 @@
pass
+def post_stream_cleanup(repo):
+ """The stream clone might needs to remove some file if persisten nodemap
+ was dropped while stream cloning
+ """
+ if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
+ return
+ if requirements.NODEMAP_REQUIREMENT in repo.requirements:
+ return
+ unfi = repo.unfiltered()
+ delete_nodemap(None, unfi, unfi.changelog)
+ delete_nodemap(None, repo, unfi.manifestlog._rootstore._revlog)
+
+
def persisted_data(revlog):
"""read the nodemap for a revlog from disk"""
if revlog._nodemap_file is None:
@@ -144,10 +158,12 @@
def delete_nodemap(tr, repo, revlog):
"""Delete nodemap data on disk for a given revlog"""
- if revlog._nodemap_file is None:
- msg = "calling persist nodemap on a revlog without the feature enabled"
- raise error.ProgrammingError(msg)
- repo.svfs.tryunlink(revlog._nodemap_file)
+ prefix = revlog.radix
+ pattern = re.compile(br"(^|/)%s(-[0-9a-f]+\.nd|\.n(\.a)?)$" % prefix)
+ dirpath = revlog.opener.dirname(revlog._indexfile)
+ for f in revlog.opener.listdir(dirpath):
+ if pattern.match(f):
+ repo.svfs.tryunlink(f)
def persist_nodemap(tr, revlog, pending=False, force=False):
--- a/mercurial/revlogutils/revlogv0.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/revlogutils/revlogv0.py Fri Feb 18 14:27:43 2022 +0100
@@ -47,12 +47,6 @@
node_id=sha1nodeconstants.nullid,
)
- @property
- def nodemap(self):
- msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self._nodemap
-
@util.propertycache
def _nodemap(self):
nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
--- a/mercurial/scmutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/scmutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -180,6 +180,8 @@
)
)
except error.RepoError as inst:
+ if isinstance(inst, error.RepoLookupError):
+ detailed_exit_code = 10
ui.error(_(b"abort: %s\n") % inst)
if inst.hint:
ui.error(_(b"(%s)\n") % inst.hint)
@@ -341,13 +343,13 @@
if fl in self._loweredfiles and f not in self._dirstate:
msg = _(b'possible case-folding collision for %s') % f
if self._abort:
- raise error.Abort(msg)
+ raise error.StateError(msg)
self._ui.warn(_(b"warning: %s\n") % msg)
self._loweredfiles.add(fl)
self._newfiles.add(f)
-def filteredhash(repo, maxrev):
+def filteredhash(repo, maxrev, needobsolete=False):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
@@ -356,22 +358,31 @@
of revisions in the view may change without the repository tiprev and
tipnode changing.
- This function hashes all the revs filtered from the view and returns
- that SHA-1 digest.
+ This function hashes all the revs filtered from the view (and, optionally,
+ all obsolete revs) up to maxrev and returns that SHA-1 digest.
"""
cl = repo.changelog
- if not cl.filteredrevs:
- return None
- key = cl._filteredrevs_hashcache.get(maxrev)
- if not key:
- revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
+ if needobsolete:
+ obsrevs = obsolete.getrevs(repo, b'obsolete')
+ if not cl.filteredrevs and not obsrevs:
+ return None
+ key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
+ else:
+ if not cl.filteredrevs:
+ return None
+ key = maxrev
+ obsrevs = frozenset()
+
+ result = cl._filteredrevs_hashcache.get(key)
+ if not result:
+ revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
if revs:
s = hashutil.sha1()
for rev in revs:
s.update(b'%d;' % rev)
- key = s.digest()
- cl._filteredrevs_hashcache[maxrev] = key
- return key
+ result = s.digest()
+ cl._filteredrevs_hashcache[key] = result
+ return result
def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
@@ -2195,6 +2206,9 @@
returns a repo object with the required changesets unhidden
"""
+ if not specs:
+ return repo
+
if not repo.filtername or not repo.ui.configbool(
b'experimental', b'directaccess'
):
--- a/mercurial/shelve.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/shelve.py Fri Feb 18 14:27:43 2022 +0100
@@ -1000,7 +1000,11 @@
stats = merge.graft(
repo,
shelvectx,
- labels=[b'working-copy', b'shelve'],
+ labels=[
+ b'working-copy',
+ b'shelved change',
+ b'parent of shelved change',
+ ],
keepconflictparent=True,
)
if stats.unresolvedcount:
--- a/mercurial/simplemerge.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/simplemerge.py Fri Feb 18 14:27:43 2022 +0100
@@ -19,20 +19,14 @@
from __future__ import absolute_import
from .i18n import _
-from .node import nullrev
from . import (
error,
mdiff,
pycompat,
- util,
)
from .utils import stringutil
-class CantReprocessAndShowBase(Exception):
- pass
-
-
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
@@ -89,72 +83,6 @@
self.a = a
self.b = b
- def merge_lines(
- self,
- name_a=None,
- name_b=None,
- name_base=None,
- start_marker=b'<<<<<<<',
- mid_marker=b'=======',
- end_marker=b'>>>>>>>',
- base_marker=None,
- localorother=None,
- minimize=False,
- ):
- """Return merge in cvs-like form."""
- self.conflicts = False
- newline = b'\n'
- if len(self.a) > 0:
- if self.a[0].endswith(b'\r\n'):
- newline = b'\r\n'
- elif self.a[0].endswith(b'\r'):
- newline = b'\r'
- if name_a and start_marker:
- start_marker = start_marker + b' ' + name_a
- if name_b and end_marker:
- end_marker = end_marker + b' ' + name_b
- if name_base and base_marker:
- base_marker = base_marker + b' ' + name_base
- merge_regions = self.merge_regions()
- if minimize:
- merge_regions = self.minimize(merge_regions)
- for t in merge_regions:
- what = t[0]
- if what == b'unchanged':
- for i in range(t[1], t[2]):
- yield self.base[i]
- elif what == b'a' or what == b'same':
- for i in range(t[1], t[2]):
- yield self.a[i]
- elif what == b'b':
- for i in range(t[1], t[2]):
- yield self.b[i]
- elif what == b'conflict':
- if localorother == b'local':
- for i in range(t[3], t[4]):
- yield self.a[i]
- elif localorother == b'other':
- for i in range(t[5], t[6]):
- yield self.b[i]
- else:
- self.conflicts = True
- if start_marker is not None:
- yield start_marker + newline
- for i in range(t[3], t[4]):
- yield self.a[i]
- if base_marker is not None:
- yield base_marker + newline
- for i in range(t[1], t[2]):
- yield self.base[i]
- if mid_marker is not None:
- yield mid_marker + newline
- for i in range(t[5], t[6]):
- yield self.b[i]
- if end_marker is not None:
- yield end_marker + newline
- else:
- raise ValueError(what)
-
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
@@ -170,7 +98,7 @@
'b', lines
Lines taken from b
- 'conflict', base_lines, a_lines, b_lines
+ 'conflict', (base_lines, a_lines, b_lines)
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
@@ -184,9 +112,11 @@
elif what == b'conflict':
yield (
what,
- self.base[t[1] : t[2]],
- self.a[t[3] : t[4]],
- self.b[t[5] : t[6]],
+ (
+ self.base[t[1] : t[2]],
+ self.a[t[3] : t[4]],
+ self.b[t[5] : t[6]],
+ ),
)
else:
raise ValueError(what)
@@ -280,67 +210,6 @@
ia = aend
ib = bend
- def minimize(self, merge_regions):
- """Trim conflict regions of lines where A and B sides match.
-
- Lines where both A and B have made the same changes at the beginning
- or the end of each merge region are eliminated from the conflict
- region and are instead considered the same.
- """
- for region in merge_regions:
- if region[0] != b"conflict":
- yield region
- continue
- # pytype thinks this tuple contains only 3 things, but
- # that's clearly not true because this code successfully
- # executes. It might be wise to rework merge_regions to be
- # some kind of attrs type.
- (
- issue,
- z1,
- z2,
- a1,
- a2,
- b1,
- b2,
- ) = region # pytype: disable=bad-unpacking
- alen = a2 - a1
- blen = b2 - b1
-
- # find matches at the front
- ii = 0
- while (
- ii < alen and ii < blen and self.a[a1 + ii] == self.b[b1 + ii]
- ):
- ii += 1
- startmatches = ii
-
- # find matches at the end
- ii = 0
- while (
- ii < alen
- and ii < blen
- and self.a[a2 - ii - 1] == self.b[b2 - ii - 1]
- ):
- ii += 1
- endmatches = ii
-
- if startmatches > 0:
- yield b'same', a1, a1 + startmatches
-
- yield (
- b'conflict',
- z1,
- z2,
- a1 + startmatches,
- a2 - endmatches,
- b1 + startmatches,
- b2 - endmatches,
- )
-
- if endmatches > 0:
- yield b'same', a2 - endmatches, a2
-
def find_sync_regions(self):
"""Return a list of sync regions, where both descendants match the base.
@@ -403,39 +272,136 @@
return sl
-def _verifytext(text, path, ui, opts):
+def _verifytext(input):
"""verifies that text is non-binary (unless opts[text] is passed,
then we just warn)"""
- if stringutil.binary(text):
- msg = _(b"%s looks like a binary file.") % path
- if not opts.get('quiet'):
- ui.warn(_(b'warning: %s\n') % msg)
- if not opts.get('text'):
- raise error.Abort(msg)
- return text
+ if stringutil.binary(input.text()):
+ msg = _(b"%s looks like a binary file.") % input.fctx.path()
+ raise error.Abort(msg)
+
+
+def _format_labels(*inputs):
+ pad = max(len(input.label) if input.label else 0 for input in inputs)
+ labels = []
+ for input in inputs:
+ if input.label:
+ if input.label_detail:
+ label = (
+ (input.label + b':').ljust(pad + 1)
+ + b' '
+ + input.label_detail
+ )
+ else:
+ label = input.label
+ # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
+ labels.append(stringutil.ellipsis(label, 80 - 8))
+ else:
+ labels.append(None)
+ return labels
+
+
+def _detect_newline(m3):
+ if len(m3.a) > 0:
+ if m3.a[0].endswith(b'\r\n'):
+ return b'\r\n'
+ elif m3.a[0].endswith(b'\r'):
+ return b'\r'
+ return b'\n'
-def _picklabels(defaults, overrides):
- if len(overrides) > 3:
- raise error.Abort(_(b"can only specify three labels."))
- result = defaults[:]
- for i, override in enumerate(overrides):
- result[i] = override
- return result
+def _minimize(a_lines, b_lines):
+ """Trim conflict regions of lines where A and B sides match.
+
+ Lines where both A and B have made the same changes at the beginning
+ or the end of each merge region are eliminated from the conflict
+ region and are instead considered the same.
+ """
+ alen = len(a_lines)
+ blen = len(b_lines)
+
+ # find matches at the front
+ ii = 0
+ while ii < alen and ii < blen and a_lines[ii] == b_lines[ii]:
+ ii += 1
+ startmatches = ii
+
+ # find matches at the end
+ ii = 0
+ while ii < alen and ii < blen and a_lines[-ii - 1] == b_lines[-ii - 1]:
+ ii += 1
+ endmatches = ii
+
+ lines_before = a_lines[:startmatches]
+ new_a_lines = a_lines[startmatches : alen - endmatches]
+ new_b_lines = b_lines[startmatches : blen - endmatches]
+ lines_after = a_lines[alen - endmatches :]
+ return lines_before, new_a_lines, new_b_lines, lines_after
-def is_not_null(ctx):
- if not util.safehasattr(ctx, "node"):
- return False
- return ctx.rev() != nullrev
+def render_minimized(
+ m3,
+ name_a=None,
+ name_b=None,
+ start_marker=b'<<<<<<<',
+ mid_marker=b'=======',
+ end_marker=b'>>>>>>>',
+):
+ """Return merge in cvs-like form."""
+ newline = _detect_newline(m3)
+ conflicts = False
+ if name_a:
+ start_marker = start_marker + b' ' + name_a
+ if name_b:
+ end_marker = end_marker + b' ' + name_b
+ merge_groups = m3.merge_groups()
+ lines = []
+ for what, group_lines in merge_groups:
+ if what == b'conflict':
+ conflicts = True
+ base_lines, a_lines, b_lines = group_lines
+ minimized = _minimize(a_lines, b_lines)
+ lines_before, a_lines, b_lines, lines_after = minimized
+ lines.extend(lines_before)
+ lines.append(start_marker + newline)
+ lines.extend(a_lines)
+ lines.append(mid_marker + newline)
+ lines.extend(b_lines)
+ lines.append(end_marker + newline)
+ lines.extend(lines_after)
+ else:
+ lines.extend(group_lines)
+ return lines, conflicts
-def _mergediff(m3, name_a, name_b, name_base):
+def render_merge3(m3, name_a, name_b, name_base):
+ """Render conflicts as 3-way conflict markers."""
+ newline = _detect_newline(m3)
+ conflicts = False
+ lines = []
+ for what, group_lines in m3.merge_groups():
+ if what == b'conflict':
+ base_lines, a_lines, b_lines = group_lines
+ conflicts = True
+ lines.append(b'<<<<<<< ' + name_a + newline)
+ lines.extend(a_lines)
+ lines.append(b'||||||| ' + name_base + newline)
+ lines.extend(base_lines)
+ lines.append(b'=======' + newline)
+ lines.extend(b_lines)
+ lines.append(b'>>>>>>> ' + name_b + newline)
+ else:
+ lines.extend(group_lines)
+ return lines, conflicts
+
+
+def render_mergediff(m3, name_a, name_b, name_base):
+ """Render conflicts as conflict markers with one snapshot and one diff."""
+ newline = _detect_newline(m3)
lines = []
conflicts = False
- for group in m3.merge_groups():
- if group[0] == b'conflict':
- base_lines, a_lines, b_lines = group[1:]
+ for what, group_lines in m3.merge_groups():
+ if what == b'conflict':
+ base_lines, a_lines, b_lines = group_lines
base_text = b''.join(base_lines)
b_blocks = list(
mdiff.allblocks(
@@ -472,95 +438,95 @@
for line in lines2[block[2] : block[3]]:
yield b'+' + line
- lines.append(b"<<<<<<<\n")
+ lines.append(b"<<<<<<<" + newline)
if matching_lines(a_blocks) < matching_lines(b_blocks):
- lines.append(b"======= %s\n" % name_a)
+ lines.append(b"======= " + name_a + newline)
lines.extend(a_lines)
- lines.append(b"------- %s\n" % name_base)
- lines.append(b"+++++++ %s\n" % name_b)
+ lines.append(b"------- " + name_base + newline)
+ lines.append(b"+++++++ " + name_b + newline)
lines.extend(diff_lines(b_blocks, base_lines, b_lines))
else:
- lines.append(b"------- %s\n" % name_base)
- lines.append(b"+++++++ %s\n" % name_a)
+ lines.append(b"------- " + name_base + newline)
+ lines.append(b"+++++++ " + name_a + newline)
lines.extend(diff_lines(a_blocks, base_lines, a_lines))
- lines.append(b"======= %s\n" % name_b)
+ lines.append(b"======= " + name_b + newline)
lines.extend(b_lines)
- lines.append(b">>>>>>>\n")
+ lines.append(b">>>>>>>" + newline)
conflicts = True
else:
- lines.extend(group[1])
+ lines.extend(group_lines)
return lines, conflicts
-def simplemerge(ui, localctx, basectx, otherctx, **opts):
+def _resolve(m3, sides):
+ lines = []
+ for what, group_lines in m3.merge_groups():
+ if what == b'conflict':
+ for side in sides:
+ lines.extend(group_lines[side])
+ else:
+ lines.extend(group_lines)
+ return lines
+
+
+class MergeInput(object):
+ def __init__(self, fctx, label=None, label_detail=None):
+ self.fctx = fctx
+ self.label = label
+ # If the "detail" part is set, then that is rendered after the label and
+ # separated by a ':'. The label is padded to make the ':' aligned among
+ # all merge inputs.
+ self.label_detail = label_detail
+ self._text = None
+
+ def text(self):
+ if self._text is None:
+ # Merges were always run in the working copy before, which means
+ # they used decoded data, if the user defined any repository
+ # filters.
+ #
+ # Maintain that behavior today for BC, though perhaps in the future
+ # it'd be worth considering whether merging encoded data (what the
+ # repository usually sees) might be more useful.
+ self._text = self.fctx.decodeddata()
+ return self._text
+
+
+def simplemerge(
+ local,
+ base,
+ other,
+ mode=b'merge',
+ allow_binary=False,
+):
"""Performs the simplemerge algorithm.
The merged result is written into `localctx`.
"""
- def readctx(ctx):
- # Merges were always run in the working copy before, which means
- # they used decoded data, if the user defined any repository
- # filters.
- #
- # Maintain that behavior today for BC, though perhaps in the future
- # it'd be worth considering whether merging encoded data (what the
- # repository usually sees) might be more useful.
- return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts)
-
- mode = opts.get('mode', b'merge')
- name_a, name_b, name_base = None, None, None
- if mode != b'union':
- name_a, name_b, name_base = _picklabels(
- [localctx.path(), otherctx.path(), None], opts.get('label', [])
- )
-
- try:
- localtext = readctx(localctx)
- basetext = readctx(basectx)
- othertext = readctx(otherctx)
- except error.Abort:
- return 1
+ if not allow_binary:
+ _verifytext(local)
+ _verifytext(base)
+ _verifytext(other)
- m3 = Merge3Text(basetext, localtext, othertext)
- extrakwargs = {
- b"localorother": opts.get("localorother", None),
- b'minimize': True,
- }
+ m3 = Merge3Text(base.text(), local.text(), other.text())
+ conflicts = False
if mode == b'union':
- extrakwargs[b'start_marker'] = None
- extrakwargs[b'mid_marker'] = None
- extrakwargs[b'end_marker'] = None
- elif name_base is not None:
- extrakwargs[b'base_marker'] = b'|||||||'
- extrakwargs[b'name_base'] = name_base
- extrakwargs[b'minimize'] = False
-
- if mode == b'mergediff':
- lines, conflicts = _mergediff(m3, name_a, name_b, name_base)
+ lines = _resolve(m3, (1, 2))
+ elif mode == b'local':
+ lines = _resolve(m3, (1,))
+ elif mode == b'other':
+ lines = _resolve(m3, (2,))
else:
- lines = list(
- m3.merge_lines(
- name_a=name_a, name_b=name_b, **pycompat.strkwargs(extrakwargs)
- )
- )
- conflicts = m3.conflicts
-
- # merge flags if necessary
- flags = localctx.flags()
- localflags = set(pycompat.iterbytestr(flags))
- otherflags = set(pycompat.iterbytestr(otherctx.flags()))
- if is_not_null(basectx) and localflags != otherflags:
- baseflags = set(pycompat.iterbytestr(basectx.flags()))
- commonflags = localflags & otherflags
- addedflags = (localflags ^ otherflags) - baseflags
- flags = b''.join(sorted(commonflags | addedflags))
+ if mode == b'mergediff':
+ labels = _format_labels(local, other, base)
+ lines, conflicts = render_mergediff(m3, *labels)
+ elif mode == b'merge3':
+ labels = _format_labels(local, other, base)
+ lines, conflicts = render_merge3(m3, *labels)
+ else:
+ labels = _format_labels(local, other)
+ lines, conflicts = render_minimized(m3, *labels)
mergedtext = b''.join(lines)
- if opts.get('print'):
- ui.fout.write(mergedtext)
- else:
- localctx.write(mergedtext, flags)
-
- if conflicts and not mode == b'union':
- return 1
+ return mergedtext, conflicts
--- a/mercurial/sparse.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/sparse.py Fri Feb 18 14:27:43 2022 +0100
@@ -38,63 +38,66 @@
Returns a tuple of includes, excludes, and profiles.
"""
- includes = set()
- excludes = set()
- profiles = set()
- current = None
- havesection = False
+ with util.timedcm(
+ 'sparse.parseconfig(ui, %d bytes, action=%s)', len(raw), action
+ ):
+ includes = set()
+ excludes = set()
+ profiles = set()
+ current = None
+ havesection = False
- for line in raw.split(b'\n'):
- line = line.strip()
- if not line or line.startswith(b'#'):
- # empty or comment line, skip
- continue
- elif line.startswith(b'%include '):
- line = line[9:].strip()
- if line:
- profiles.add(line)
- elif line == b'[include]':
- if havesection and current != includes:
- # TODO pass filename into this API so we can report it.
- raise error.Abort(
- _(
- b'%(action)s config cannot have includes '
- b'after excludes'
+ for line in raw.split(b'\n'):
+ line = line.strip()
+ if not line or line.startswith(b'#'):
+ # empty or comment line, skip
+ continue
+ elif line.startswith(b'%include '):
+ line = line[9:].strip()
+ if line:
+ profiles.add(line)
+ elif line == b'[include]':
+ if havesection and current != includes:
+ # TODO pass filename into this API so we can report it.
+ raise error.Abort(
+ _(
+ b'%(action)s config cannot have includes '
+ b'after excludes'
+ )
+ % {b'action': action}
)
- % {b'action': action}
- )
- havesection = True
- current = includes
- continue
- elif line == b'[exclude]':
- havesection = True
- current = excludes
- elif line:
- if current is None:
- raise error.Abort(
- _(
- b'%(action)s config entry outside of '
- b'section: %(line)s'
+ havesection = True
+ current = includes
+ continue
+ elif line == b'[exclude]':
+ havesection = True
+ current = excludes
+ elif line:
+ if current is None:
+ raise error.Abort(
+ _(
+ b'%(action)s config entry outside of '
+ b'section: %(line)s'
+ )
+ % {b'action': action, b'line': line},
+ hint=_(
+ b'add an [include] or [exclude] line '
+ b'to declare the entry type'
+ ),
)
- % {b'action': action, b'line': line},
- hint=_(
- b'add an [include] or [exclude] line '
- b'to declare the entry type'
- ),
- )
- if line.strip().startswith(b'/'):
- ui.warn(
- _(
- b'warning: %(action)s profile cannot use'
- b' paths starting with /, ignoring %(line)s\n'
+ if line.strip().startswith(b'/'):
+ ui.warn(
+ _(
+ b'warning: %(action)s profile cannot use'
+ b' paths starting with /, ignoring %(line)s\n'
+ )
+ % {b'action': action, b'line': line}
)
- % {b'action': action, b'line': line}
- )
- continue
- current.add(line)
+ continue
+ current.add(line)
- return includes, excludes, profiles
+ return includes, excludes, profiles
# Exists as separate function to facilitate monkeypatching.
@@ -396,7 +399,7 @@
temporaryfiles.append(file)
prunedactions[file] = action
elif branchmerge:
- if type not in mergestatemod.NO_OP_ACTIONS:
+ if not type.no_op:
temporaryfiles.append(file)
prunedactions[file] = action
elif type == mergestatemod.ACTION_FORGET:
@@ -600,38 +603,41 @@
repo, includes, excludes, profiles, force=False, removing=False
):
"""Update the sparse config and working directory state."""
- raw = repo.vfs.tryread(b'sparse')
- oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
-
- oldstatus = repo.status()
- oldmatch = matcher(repo)
- oldrequires = set(repo.requirements)
+ with repo.lock():
+ raw = repo.vfs.tryread(b'sparse')
+ oldincludes, oldexcludes, oldprofiles = parseconfig(
+ repo.ui, raw, b'sparse'
+ )
- # TODO remove this try..except once the matcher integrates better
- # with dirstate. We currently have to write the updated config
- # because that will invalidate the matcher cache and force a
- # re-read. We ideally want to update the cached matcher on the
- # repo instance then flush the new config to disk once wdir is
- # updated. But this requires massive rework to matcher() and its
- # consumers.
+ oldstatus = repo.status()
+ oldmatch = matcher(repo)
+ oldrequires = set(repo.requirements)
+
+ # TODO remove this try..except once the matcher integrates better
+ # with dirstate. We currently have to write the updated config
+ # because that will invalidate the matcher cache and force a
+ # re-read. We ideally want to update the cached matcher on the
+ # repo instance then flush the new config to disk once wdir is
+ # updated. But this requires massive rework to matcher() and its
+ # consumers.
- if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
- repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
- scmutil.writereporequirements(repo)
- elif requirements.SPARSE_REQUIREMENT not in oldrequires:
- repo.requirements.add(requirements.SPARSE_REQUIREMENT)
- scmutil.writereporequirements(repo)
+ if requirements.SPARSE_REQUIREMENT in oldrequires and removing:
+ repo.requirements.discard(requirements.SPARSE_REQUIREMENT)
+ scmutil.writereporequirements(repo)
+ elif requirements.SPARSE_REQUIREMENT not in oldrequires:
+ repo.requirements.add(requirements.SPARSE_REQUIREMENT)
+ scmutil.writereporequirements(repo)
- try:
- writeconfig(repo, includes, excludes, profiles)
- return refreshwdir(repo, oldstatus, oldmatch, force=force)
- except Exception:
- if repo.requirements != oldrequires:
- repo.requirements.clear()
- repo.requirements |= oldrequires
- scmutil.writereporequirements(repo)
- writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
- raise
+ try:
+ writeconfig(repo, includes, excludes, profiles)
+ return refreshwdir(repo, oldstatus, oldmatch, force=force)
+ except Exception:
+ if repo.requirements != oldrequires:
+ repo.requirements.clear()
+ repo.requirements |= oldrequires
+ scmutil.writereporequirements(repo)
+ writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
+ raise
def clearrules(repo, force=False):
@@ -701,21 +707,18 @@
def updateconfig(
repo,
- pats,
opts,
- include=False,
- exclude=False,
+ include=(),
+ exclude=(),
reset=False,
- delete=False,
- enableprofile=False,
- disableprofile=False,
+ delete=(),
+ enableprofile=(),
+ disableprofile=(),
force=False,
usereporootpaths=False,
):
"""Perform a sparse config update.
- Only one of the actions may be performed.
-
The new config is written out and a working directory refresh is performed.
"""
with repo.wlock(), repo.lock(), repo.dirstate.parentchange():
@@ -733,10 +736,13 @@
newexclude = set(oldexclude)
newprofiles = set(oldprofiles)
- if any(os.path.isabs(pat) for pat in pats):
- raise error.Abort(_(b'paths cannot be absolute'))
+ def normalize_pats(pats):
+ if any(os.path.isabs(pat) for pat in pats):
+ raise error.Abort(_(b'paths cannot be absolute'))
- if not usereporootpaths:
+ if usereporootpaths:
+ return pats
+
# let's treat paths as relative to cwd
root, cwd = repo.root, repo.getcwd()
abspats = []
@@ -749,19 +755,20 @@
abspats.append(ap)
else:
abspats.append(kindpat)
- pats = abspats
+ return abspats
- if include:
- newinclude.update(pats)
- elif exclude:
- newexclude.update(pats)
- elif enableprofile:
- newprofiles.update(pats)
- elif disableprofile:
- newprofiles.difference_update(pats)
- elif delete:
- newinclude.difference_update(pats)
- newexclude.difference_update(pats)
+ include = normalize_pats(include)
+ exclude = normalize_pats(exclude)
+ delete = normalize_pats(delete)
+ disableprofile = normalize_pats(disableprofile)
+ enableprofile = normalize_pats(enableprofile)
+
+ newinclude.difference_update(delete)
+ newexclude.difference_update(delete)
+ newprofiles.difference_update(disableprofile)
+ newinclude.update(include)
+ newprofiles.update(enableprofile)
+ newexclude.update(exclude)
profilecount = len(newprofiles - oldprofiles) - len(
oldprofiles - newprofiles
--- a/mercurial/sshpeer.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/sshpeer.py Fri Feb 18 14:27:43 2022 +0100
@@ -16,7 +16,6 @@
error,
pycompat,
util,
- wireprotoserver,
wireprototypes,
wireprotov1peer,
wireprotov1server,
@@ -288,10 +287,6 @@
# Generate a random token to help identify responses to version 2
# upgrade request.
token = pycompat.sysbytes(str(uuid.uuid4()))
- upgradecaps = [
- (b'proto', wireprotoserver.SSHV2),
- ]
- upgradecaps = util.urlreq.urlencode(upgradecaps)
try:
pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
@@ -302,11 +297,6 @@
pairsarg,
]
- # Request upgrade to version 2 if configured.
- if ui.configbool(b'experimental', b'sshpeer.advertise-v2'):
- ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps))
- handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps))
-
if requestlog:
ui.debug(b'devel-peer-request: hello+between\n')
ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
@@ -365,24 +355,6 @@
if l.startswith(b'capabilities:'):
caps.update(l[:-1].split(b':')[1].split())
break
- elif protoname == wireprotoserver.SSHV2:
- # We see a line with number of bytes to follow and then a value
- # looking like ``capabilities: *``.
- line = stdout.readline()
- try:
- valuelen = int(line)
- except ValueError:
- badresponse()
-
- capsline = stdout.read(valuelen)
- if not capsline.startswith(b'capabilities: '):
- badresponse()
-
- ui.debug(b'remote: %s\n' % capsline)
-
- caps.update(capsline.split(b':')[1].split())
- # Trailing newline.
- stdout.read(1)
# Error if we couldn't find capabilities, this means:
#
@@ -601,14 +573,6 @@
self._readerr()
-class sshv2peer(sshv1peer):
- """A peer that speakers version 2 of the transport protocol."""
-
- # Currently version 2 is identical to version 1 post handshake.
- # And handshake is performed before the peer is instantiated. So
- # we need no custom code.
-
-
def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
"""Make a peer instance from existing pipes.
@@ -640,17 +604,6 @@
caps,
autoreadstderr=autoreadstderr,
)
- elif protoname == wireprototypes.SSHV2:
- return sshv2peer(
- ui,
- path,
- proc,
- stdin,
- stdout,
- stderr,
- caps,
- autoreadstderr=autoreadstderr,
- )
else:
_cleanuppipes(ui, stdout, stdin, stderr, warn=None)
raise error.RepoError(
--- a/mercurial/sslutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/sslutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -139,12 +139,18 @@
alg, fingerprint = fingerprint.split(b':', 1)
fingerprint = fingerprint.replace(b':', b'').lower()
+ # pytype: disable=attribute-error
+ # `s` is heterogeneous, but this entry is always a list of tuples
s[b'certfingerprints'].append((alg, fingerprint))
+ # pytype: enable=attribute-error
# Fingerprints from [hostfingerprints] are always SHA-1.
for fingerprint in ui.configlist(b'hostfingerprints', bhostname):
fingerprint = fingerprint.replace(b':', b'').lower()
+ # pytype: disable=attribute-error
+ # `s` is heterogeneous, but this entry is always a list of tuples
s[b'certfingerprints'].append((b'sha1', fingerprint))
+ # pytype: enable=attribute-error
s[b'legacyfingerprint'] = True
# If a host cert fingerprint is defined, it is the only thing that
--- a/mercurial/statichttprepo.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/statichttprepo.py Fri Feb 18 14:27:43 2022 +0100
@@ -22,6 +22,7 @@
namespaces,
pathutil,
pycompat,
+ requirements as requirementsmod,
url,
util,
vfs as vfsmod,
@@ -197,6 +198,9 @@
# we do not care about empty old-style repositories here
msg = _(b"'%s' does not appear to be an hg repository") % path
raise error.RepoError(msg)
+ if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
+ storevfs = vfsclass(self.vfs.join(b'store'))
+ requirements |= set(storevfs.read(b'requires').splitlines())
supportedrequirements = localrepo.gathersupportedrequirements(ui)
localrepo.ensurerequirementsrecognized(
--- a/mercurial/statprof.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/statprof.py Fri Feb 18 14:27:43 2022 +0100
@@ -494,9 +494,9 @@
data = state
if fp is None:
- import sys
+ from .utils import procutil
- fp = sys.stdout
+ fp = procutil.stdout
if len(data.samples) == 0:
fp.write(b'No samples recorded.\n')
return
@@ -516,7 +516,7 @@
elif format == DisplayFormats.Chrome:
write_to_chrome(data, fp, **kwargs)
else:
- raise Exception(b"Invalid display format")
+ raise Exception("Invalid display format")
if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
fp.write(b'---\n')
@@ -625,7 +625,7 @@
def display_about_method(data, fp, function=None, **kwargs):
if function is None:
- raise Exception(b"Invalid function")
+ raise Exception("Invalid function")
filename = None
if b':' in function:
@@ -1080,7 +1080,7 @@
printusage()
return 0
else:
- assert False, b"unhandled option %s" % o
+ assert False, "unhandled option %s" % o
if not path:
print('must specify --file to load')
--- a/mercurial/streamclone.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/streamclone.py Fri Feb 18 14:27:43 2022 +0100
@@ -27,11 +27,38 @@
store,
util,
)
+from .revlogutils import (
+ nodemap,
+)
from .utils import (
stringutil,
)
+def new_stream_clone_requirements(default_requirements, streamed_requirements):
+ """determine the final set of requirement for a new stream clone
+
+ this method combine the "default" requirements that a new repository would
+ use with the constaint we get from the stream clone content. We keep local
+ configuration choice when possible.
+ """
+ requirements = set(default_requirements)
+ requirements -= requirementsmod.STREAM_FIXED_REQUIREMENTS
+ requirements.update(streamed_requirements)
+ return requirements
+
+
+def streamed_requirements(repo):
+ """the set of requirement the new clone will have to support
+
+ This is used for advertising the stream options and to generate the actual
+ stream content."""
+ requiredformats = (
+ repo.requirements & requirementsmod.STREAM_FIXED_REQUIREMENTS
+ )
+ return requiredformats
+
+
def canperformstreamclone(pullop, bundle2=False):
"""Whether it is possible to perform a streaming clone as part of pull.
@@ -184,17 +211,15 @@
with repo.lock():
consumev1(repo, fp, filecount, bytecount)
-
- # new requirements = old non-format requirements +
- # new format-related remote requirements
- # requirements from the streamed-in repository
- repo.requirements = requirements | (
- repo.requirements - repo.supportedformats
+ repo.requirements = new_stream_clone_requirements(
+ repo.requirements,
+ requirements,
)
repo.svfs.options = localrepo.resolvestorevfsoptions(
repo.ui, repo.requirements, repo.features
)
scmutil.writereporequirements(repo)
+ nodemap.post_stream_cleanup(repo)
if rbranchmap:
repo._branchcaches.replace(repo, rbranchmap)
@@ -333,7 +358,7 @@
if compression != b'UN':
raise ValueError(b'we do not support the compression argument yet')
- requirements = repo.requirements & repo.supportedformats
+ requirements = streamed_requirements(repo)
requires = b','.join(sorted(requirements))
def gen():
@@ -489,6 +514,7 @@
)
consumev1(repo, fp, filecount, bytecount)
+ nodemap.post_stream_cleanup(repo)
class streamcloneapplier(object):
@@ -797,16 +823,15 @@
consumev2(repo, fp, filecount, filesize)
- # new requirements = old non-format requirements +
- # new format-related remote requirements
- # requirements from the streamed-in repository
- repo.requirements = set(requirements) | (
- repo.requirements - repo.supportedformats
+ repo.requirements = new_stream_clone_requirements(
+ repo.requirements,
+ requirements,
)
repo.svfs.options = localrepo.resolvestorevfsoptions(
repo.ui, repo.requirements, repo.features
)
scmutil.writereporequirements(repo)
+ nodemap.post_stream_cleanup(repo)
def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
--- a/mercurial/templatekw.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/templatekw.py Fri Feb 18 14:27:43 2022 +0100
@@ -304,6 +304,21 @@
)
+@templatekeyword(b'_fast_rank', requires={b'ctx'})
+def fast_rank(context, mapping):
+ """the rank of a changeset if cached
+
+ The rank of a revision is the size of the sub-graph it defines as a head.
+ Equivalently, the rank of a revision `r` is the size of the set
+ `ancestors(r)`, `r` included.
+ """
+ ctx = context.resource(mapping, b'ctx')
+ rank = ctx.fast_rank()
+ if rank is None:
+ return None
+ return b"%d" % rank
+
+
def _getfilestatus(context, mapping, listall=False):
ctx = context.resource(mapping, b'ctx')
revcache = context.resource(mapping, b'revcache')
--- a/mercurial/transaction.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/transaction.py Fri Feb 18 14:27:43 2022 +0100
@@ -25,11 +25,6 @@
version = 2
-# These are the file generators that should only be executed after the
-# finalizers are done, since they rely on the output of the finalizers (like
-# the changelog having been written).
-postfinalizegenerators = {b'bookmarks', b'dirstate'}
-
GEN_GROUP_ALL = b'all'
GEN_GROUP_PRE_FINALIZE = b'prefinalize'
GEN_GROUP_POST_FINALIZE = b'postfinalize'
@@ -334,7 +329,13 @@
@active
def addfilegenerator(
- self, genid, filenames, genfunc, order=0, location=b''
+ self,
+ genid,
+ filenames,
+ genfunc,
+ order=0,
+ location=b'',
+ post_finalize=False,
):
"""add a function to generates some files at transaction commit
@@ -357,10 +358,14 @@
The `location` arguments may be used to indicate the files are located
outside of the the standard directory for transaction. It should match
one of the key of the `transaction.vfsmap` dictionary.
+
+ The `post_finalize` argument can be set to `True` for file generation
+ that must be run after the transaction has been finalized.
"""
# For now, we are unable to do proper backup and restore of custom vfs
# but for bookmarks that are handled outside this mechanism.
- self._filegenerators[genid] = (order, filenames, genfunc, location)
+ entry = (order, filenames, genfunc, location, post_finalize)
+ self._filegenerators[genid] = entry
@active
def removefilegenerator(self, genid):
@@ -380,13 +385,12 @@
for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
any = True
- order, filenames, genfunc, location = entry
+ order, filenames, genfunc, location, post_finalize = entry
# for generation at closing, check if it's before or after finalize
- is_post = id in postfinalizegenerators
- if skip_post and is_post:
+ if skip_post and post_finalize:
continue
- elif skip_pre and not is_post:
+ elif skip_pre and not post_finalize:
continue
vfs = self._vfsmap[location]
--- a/mercurial/unionrepo.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/unionrepo.py Fri Feb 18 14:27:43 2022 +0100
@@ -71,6 +71,7 @@
_sds,
_dcm,
_sdcm,
+ rank,
) = rev
flags = _start & 0xFFFF
@@ -107,6 +108,7 @@
0, # sidedata size
revlog_constants.COMP_MODE_INLINE,
revlog_constants.COMP_MODE_INLINE,
+ rank,
)
self.index.append(e)
self.bundlerevs.add(n)
--- a/mercurial/upgrade.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/upgrade.py Fri Feb 18 14:27:43 2022 +0100
@@ -42,27 +42,16 @@
):
"""Upgrade a repository in place."""
if optimize is None:
- optimize = {}
+ optimize = set()
repo = repo.unfiltered()
- revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
- specentries = (
- (upgrade_engine.UPGRADE_CHANGELOG, changelog),
- (upgrade_engine.UPGRADE_MANIFEST, manifest),
- (upgrade_engine.UPGRADE_FILELOGS, filelogs),
- )
- specified = [(y, x) for (y, x) in specentries if x is not None]
- if specified:
- # we have some limitation on revlogs to be recloned
- if any(x for y, x in specified):
- revlogs = set()
- for upgrade, enabled in specified:
- if enabled:
- revlogs.add(upgrade)
- else:
- # none are enabled
- for upgrade, __ in specified:
- revlogs.discard(upgrade)
+ specified_revlogs = {}
+ if changelog is not None:
+ specified_revlogs[upgrade_engine.UPGRADE_CHANGELOG] = changelog
+ if manifest is not None:
+ specified_revlogs[upgrade_engine.UPGRADE_MANIFEST] = manifest
+ if filelogs is not None:
+ specified_revlogs[upgrade_engine.UPGRADE_FILELOGS] = filelogs
# Ensure the repository can be upgraded.
upgrade_actions.check_source_requirements(repo)
@@ -96,20 +85,92 @@
)
removed_actions = upgrade_actions.find_format_downgrades(repo)
- removedreqs = repo.requirements - newreqs
- addedreqs = newreqs - repo.requirements
+ # check if we need to touch revlog and if so, which ones
+
+ touched_revlogs = set()
+ overwrite_msg = _(b'warning: ignoring %14s, as upgrade is changing: %s\n')
+ select_msg = _(b'note: selecting %s for processing to change: %s\n')
+ msg_issued = 0
+
+ FL = upgrade_engine.UPGRADE_FILELOGS
+ MN = upgrade_engine.UPGRADE_MANIFEST
+ CL = upgrade_engine.UPGRADE_CHANGELOG
+
+ if optimizations:
+ if any(specified_revlogs.values()):
+ # we have some limitation on revlogs to be recloned
+ for rl, enabled in specified_revlogs.items():
+ if enabled:
+ touched_revlogs.add(rl)
+ else:
+ touched_revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
+ for rl, enabled in specified_revlogs.items():
+ if not enabled:
+ touched_revlogs.discard(rl)
+
+ if repo.shared():
+ unsafe_actions = set()
+ unsafe_actions.update(up_actions)
+ unsafe_actions.update(removed_actions)
+ unsafe_actions.update(optimizations)
+ unsafe_actions = [
+ a for a in unsafe_actions if not a.compatible_with_share
+ ]
+ unsafe_actions.sort(key=lambda a: a.name)
+ if unsafe_actions:
+ m = _(b'cannot use these actions on a share repository: %s')
+ h = _(b'upgrade the main repository directly')
+ actions = b', '.join(a.name for a in unsafe_actions)
+ m %= actions
+ raise error.Abort(m, hint=h)
- if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
- incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
- removedreqs | addedreqs
- )
- if incompatible:
- msg = _(
- b'ignoring revlogs selection flags, format requirements '
- b'change: %s\n'
- )
- ui.warn(msg % b', '.join(sorted(incompatible)))
- revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
+ for action in sorted(up_actions + removed_actions, key=lambda a: a.name):
+ # optimisation does not "requires anything, they just needs it.
+ if action.type != upgrade_actions.FORMAT_VARIANT:
+ continue
+
+ if action.touches_filelogs and FL not in touched_revlogs:
+ if FL in specified_revlogs:
+ if not specified_revlogs[FL]:
+ msg = overwrite_msg % (b'--no-filelogs', action.name)
+ ui.warn(msg)
+ msg_issued = 2
+ else:
+ msg = select_msg % (b'all-filelogs', action.name)
+ ui.status(msg)
+ if not ui.quiet:
+ msg_issued = 1
+ touched_revlogs.add(FL)
+
+ if action.touches_manifests and MN not in touched_revlogs:
+ if MN in specified_revlogs:
+ if not specified_revlogs[MN]:
+ msg = overwrite_msg % (b'--no-manifest', action.name)
+ ui.warn(msg)
+ msg_issued = 2
+ else:
+ msg = select_msg % (b'all-manifestlogs', action.name)
+ ui.status(msg)
+ if not ui.quiet:
+ msg_issued = 1
+ touched_revlogs.add(MN)
+
+ if action.touches_changelog and CL not in touched_revlogs:
+ if CL in specified_revlogs:
+ if not specified_revlogs[CL]:
+ msg = overwrite_msg % (b'--no-changelog', action.name)
+ ui.warn(msg)
+ msg_issued = True
+ else:
+ msg = select_msg % (b'changelog', action.name)
+ ui.status(msg)
+ if not ui.quiet:
+ msg_issued = 1
+ touched_revlogs.add(CL)
+ if msg_issued >= 2:
+ ui.warn((b"\n"))
+ elif msg_issued >= 1:
+ ui.status((b"\n"))
upgrade_op = upgrade_actions.UpgradeOperation(
ui,
@@ -117,7 +178,7 @@
repo.requirements,
up_actions,
removed_actions,
- revlogs,
+ touched_revlogs,
backup,
)
--- a/mercurial/upgrade_utils/actions.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/upgrade_utils/actions.py Fri Feb 18 14:27:43 2022 +0100
@@ -36,7 +36,10 @@
def preservedrequirements(repo):
- return set()
+ preserved = {
+ requirements.SHARED_REQUIREMENT,
+ }
+ return preserved & repo.requirements
FORMAT_VARIANT = b'deficiency'
@@ -97,6 +100,9 @@
# Whether this improvement touches the dirstate
touches_dirstate = False
+ # Can this action be run on a share instead of its mains repository
+ compatible_with_share = False
+
allformatvariant = [] # type: List[Type['formatvariant']]
@@ -190,6 +196,30 @@
touches_changelog = False
touches_requirements = True
touches_dirstate = True
+ compatible_with_share = True
+
+
+@registerformatvariant
+class dirstatetrackedkey(requirementformatvariant):
+ name = b'tracked-hint'
+ _requirement = requirements.DIRSTATE_TRACKED_HINT_V1
+
+ default = False
+
+ description = _(
+ b'Add a small file to help external tooling that watch the tracked set'
+ )
+
+ upgrademessage = _(
+ b'external tools will be informated of potential change in the tracked set'
+ )
+
+ touches_filelogs = False
+ touches_manifests = False
+ touches_changelog = False
+ touches_requirements = True
+ touches_dirstate = True
+ compatible_with_share = True
@registerformatvariant
@@ -243,7 +273,7 @@
name = b'share-safe'
_requirement = requirements.SHARESAFE_REQUIREMENT
- default = False
+ default = True
description = _(
b'old shared repositories do not share source repository '
@@ -899,8 +929,6 @@
# This was a precursor to generaldelta and was never enabled by default.
# It should (hopefully) not exist in the wild.
b'parentdelta',
- # Upgrade should operate on the actual store, not the shared link.
- requirements.SHARED_REQUIREMENT,
}
@@ -932,6 +960,16 @@
m = _(b'cannot upgrade repository; unsupported source requirement: %s')
blockingreqs = b', '.join(sorted(blockingreqs))
raise error.Abort(m % blockingreqs)
+ # Upgrade should operate on the actual store, not the shared link.
+
+ bad_share = (
+ requirements.SHARED_REQUIREMENT in repo.requirements
+ and requirements.SHARESAFE_REQUIREMENT not in repo.requirements
+ )
+ if bad_share:
+ m = _(b'cannot upgrade repository; share repository without share-safe')
+ h = _(b'check :hg:`help config.format.use-share-safe`')
+ raise error.Abort(m, hint=h)
### Verify the validity of the planned requirement changes ####################
@@ -952,6 +990,7 @@
requirements.REVLOGV2_REQUIREMENT,
requirements.CHANGELOGV2_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT,
+ requirements.DIRSTATE_TRACKED_HINT_V1,
requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
@@ -972,18 +1011,20 @@
Extensions should monkeypatch this to add their custom requirements.
"""
supported = {
+ requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.COPIESSDC_REQUIREMENT,
+ requirements.DIRSTATE_TRACKED_HINT_V1,
+ requirements.DIRSTATE_V2_REQUIREMENT,
requirements.DOTENCODE_REQUIREMENT,
requirements.FNCACHE_REQUIREMENT,
requirements.GENERALDELTA_REQUIREMENT,
+ requirements.NODEMAP_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
- requirements.STORE_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
+ requirements.SHARED_REQUIREMENT,
+ requirements.SHARESAFE_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.COPIESSDC_REQUIREMENT,
- requirements.NODEMAP_REQUIREMENT,
- requirements.SHARESAFE_REQUIREMENT,
- requirements.REVLOGV2_REQUIREMENT,
- requirements.CHANGELOGV2_REQUIREMENT,
- requirements.DIRSTATE_V2_REQUIREMENT,
+ requirements.STORE_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -1015,6 +1056,7 @@
requirements.REVLOGV1_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.DIRSTATE_TRACKED_HINT_V1,
requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
--- a/mercurial/upgrade_utils/engine.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/upgrade_utils/engine.py Fri Feb 18 14:27:43 2022 +0100
@@ -486,6 +486,15 @@
upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
+ if upgrade_actions.dirstatetrackedkey in upgrade_op.upgrade_actions:
+ ui.status(_(b'create dirstate-tracked-hint file\n'))
+ upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=True)
+ upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatetrackedkey)
+ elif upgrade_actions.dirstatetrackedkey in upgrade_op.removed_actions:
+ ui.status(_(b'remove dirstate-tracked-hint file\n'))
+ upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=False)
+ upgrade_op.removed_actions.remove(upgrade_actions.dirstatetrackedkey)
+
if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
return
@@ -660,3 +669,15 @@
srcrepo.dirstate.write(None)
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
+
+
+def upgrade_tracked_hint(ui, srcrepo, upgrade_op, add):
+ if add:
+ srcrepo.dirstate._use_tracked_hint = True
+ srcrepo.dirstate._dirty = True
+ srcrepo.dirstate._dirty_tracked_set = True
+ srcrepo.dirstate.write(None)
+ if not add:
+ srcrepo.dirstate.delete_tracked_hint()
+
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
--- a/mercurial/util.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/util.py Fri Feb 18 14:27:43 2022 +0100
@@ -57,7 +57,6 @@
hashutil,
procutil,
stringutil,
- urlutil,
)
if pycompat.TYPE_CHECKING:
@@ -2991,54 +2990,6 @@
return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
-def getport(*args, **kwargs):
- msg = b'getport(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.getport(*args, **kwargs)
-
-
-def url(*args, **kwargs):
- msg = b'url(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.url(*args, **kwargs)
-
-
-def hasscheme(*args, **kwargs):
- msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.hasscheme(*args, **kwargs)
-
-
-def hasdriveletter(*args, **kwargs):
- msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.hasdriveletter(*args, **kwargs)
-
-
-def urllocalpath(*args, **kwargs):
- msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.urllocalpath(*args, **kwargs)
-
-
-def checksafessh(*args, **kwargs):
- msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.checksafessh(*args, **kwargs)
-
-
-def hidepassword(*args, **kwargs):
- msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.hidepassword(*args, **kwargs)
-
-
-def removeauth(*args, **kwargs):
- msg = b'removeauth(...) moved to mercurial.utils.urlutil'
- nouideprecwarn(msg, b'6.0', stacklevel=2)
- return urlutil.removeauth(*args, **kwargs)
-
-
timecount = unitcountfn(
(1, 1e3, _(b'%.0f s')),
(100, 1, _(b'%.1f s')),
--- a/mercurial/utils/procutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/utils/procutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -75,7 +75,9 @@
return res
+# pytype: disable=attribute-error
io.BufferedIOBase.register(LineBufferedWrapper)
+# pytype: enable=attribute-error
def make_line_buffered(stream):
@@ -114,7 +116,9 @@
return total_written
+# pytype: disable=attribute-error
io.IOBase.register(WriteAllWrapper)
+# pytype: enable=attribute-error
def _make_write_all(stream):
@@ -738,6 +742,8 @@
start_new_session = False
ensurestart = True
+ stdin = None
+
try:
if stdin_bytes is None:
stdin = subprocess.DEVNULL
@@ -766,7 +772,8 @@
record_wait(255)
raise
finally:
- if stdin_bytes is not None:
+ if stdin_bytes is not None and stdin is not None:
+ assert not isinstance(stdin, int)
stdin.close()
if not ensurestart:
# Even though we're not waiting on the child process,
@@ -847,6 +854,8 @@
return
returncode = 255
+ stdin = None
+
try:
if record_wait is None:
# Start a new session
@@ -889,7 +898,8 @@
finally:
# mission accomplished, this child needs to exit and not
# continue the hg process here.
- stdin.close()
+ if stdin is not None:
+ stdin.close()
if record_wait is None:
os._exit(returncode)
--- a/mercurial/utils/storageutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/utils/storageutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -112,6 +112,13 @@
2-tuple of the source filename and node.
"""
if store.parents(node)[0] != sha1nodeconstants.nullid:
+ # When creating a copy or move we set filelog parents to null,
+ # because contents are probably unrelated and making a delta
+ # would not be useful.
+ # Conversely, if filelog p1 is non-null we know
+ # there is no copy metadata.
+ # In the presence of merges, this reasoning becomes invalid
+ # if we reorder parents. See tests/test-issue6528.t.
return False
meta = parsemeta(store.revision(node))[0]
--- a/mercurial/utils/stringutil.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/utils/stringutil.py Fri Feb 18 14:27:43 2022 +0100
@@ -264,7 +264,11 @@
q1 = rs.find(b'<', p1 + 1)
if q1 < 0:
q1 = len(rs)
+ # pytype: disable=wrong-arg-count
+ # TODO: figure out why pytype doesn't recognize the optional start
+ # arg
elif q1 > p1 + 1 and rs.startswith(b'=', q1 - 1):
+ # pytype: enable=wrong-arg-count
# backtrack for ' field=<'
q0 = rs.rfind(b' ', p1 + 1, q1 - 1)
if q0 < 0:
@@ -692,11 +696,11 @@
s = bytes(s)
# call underlying function of s.encode('string_escape') directly for
# Python 3 compatibility
- return codecs.escape_encode(s)[0]
+ return codecs.escape_encode(s)[0] # pytype: disable=module-attr
def unescapestr(s):
- return codecs.escape_decode(s)[0]
+ return codecs.escape_decode(s)[0] # pytype: disable=module-attr
def forcebytestr(obj):
--- a/mercurial/wireprotoserver.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/wireprotoserver.py Fri Feb 18 14:27:43 2022 +0100
@@ -18,11 +18,9 @@
util,
wireprototypes,
wireprotov1server,
- wireprotov2server,
)
from .interfaces import util as interfaceutil
from .utils import (
- cborutil,
compression,
stringutil,
)
@@ -39,7 +37,6 @@
HGERRTYPE = b'application/hg-error'
SSHV1 = wireprototypes.SSHV1
-SSHV2 = wireprototypes.SSHV2
def decodevaluefromheaders(req, headerprefix):
@@ -244,97 +241,6 @@
return True
-def _availableapis(repo):
- apis = set()
-
- # Registered APIs are made available via config options of the name of
- # the protocol.
- for k, v in API_HANDLERS.items():
- section, option = v[b'config']
- if repo.ui.configbool(section, option):
- apis.add(k)
-
- return apis
-
-
-def handlewsgiapirequest(rctx, req, res, checkperm):
- """Handle requests to /api/*."""
- assert req.dispatchparts[0] == b'api'
-
- repo = rctx.repo
-
- # This whole URL space is experimental for now. But we want to
- # reserve the URL space. So, 404 all URLs if the feature isn't enabled.
- if not repo.ui.configbool(b'experimental', b'web.apiserver'):
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'Experimental API server endpoint not enabled'))
- return
-
- # The URL space is /api/<protocol>/*. The structure of URLs under varies
- # by <protocol>.
-
- availableapis = _availableapis(repo)
-
- # Requests to /api/ list available APIs.
- if req.dispatchparts == [b'api']:
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- lines = [
- _(
- b'APIs can be accessed at /api/<name>, where <name> can be '
- b'one of the following:\n'
- )
- ]
- if availableapis:
- lines.extend(sorted(availableapis))
- else:
- lines.append(_(b'(no available APIs)\n'))
- res.setbodybytes(b'\n'.join(lines))
- return
-
- proto = req.dispatchparts[1]
-
- if proto not in API_HANDLERS:
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'Unknown API: %s\nKnown APIs: %s')
- % (proto, b', '.join(sorted(availableapis)))
- )
- return
-
- if proto not in availableapis:
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'API %s not enabled\n') % proto)
- return
-
- API_HANDLERS[proto][b'handler'](
- rctx, req, res, checkperm, req.dispatchparts[2:]
- )
-
-
-# Maps API name to metadata so custom API can be registered.
-# Keys are:
-#
-# config
-# Config option that controls whether service is enabled.
-# handler
-# Callable receiving (rctx, req, res, checkperm, urlparts) that is called
-# when a request to this API is received.
-# apidescriptor
-# Callable receiving (req, repo) that is called to obtain an API
-# descriptor for this service. The response must be serializable to CBOR.
-API_HANDLERS = {
- wireprotov2server.HTTP_WIREPROTO_V2: {
- b'config': (b'experimental', b'web.api.http-v2'),
- b'handler': wireprotov2server.handlehttpv2request,
- b'apidescriptor': wireprotov2server.httpv2apidescriptor,
- },
-}
-
-
def _httpresponsetype(ui, proto, prefer_uncompressed):
"""Determine the appropriate response type and compression settings.
@@ -371,55 +277,6 @@
return HGTYPE, util.compengines[b'zlib'], opts
-def processcapabilitieshandshake(repo, req, res, proto):
- """Called during a ?cmd=capabilities request.
-
- If the client is advertising support for a newer protocol, we send
- a CBOR response with information about available services. If no
- advertised services are available, we don't handle the request.
- """
- # Fall back to old behavior unless the API server is enabled.
- if not repo.ui.configbool(b'experimental', b'web.apiserver'):
- return False
-
- clientapis = decodevaluefromheaders(req, b'X-HgUpgrade')
- protocaps = decodevaluefromheaders(req, b'X-HgProto')
- if not clientapis or not protocaps:
- return False
-
- # We currently only support CBOR responses.
- protocaps = set(protocaps.split(b' '))
- if b'cbor' not in protocaps:
- return False
-
- descriptors = {}
-
- for api in sorted(set(clientapis.split()) & _availableapis(repo)):
- handler = API_HANDLERS[api]
-
- descriptorfn = handler.get(b'apidescriptor')
- if not descriptorfn:
- continue
-
- descriptors[api] = descriptorfn(req, repo)
-
- v1caps = wireprotov1server.dispatch(repo, proto, b'capabilities')
- assert isinstance(v1caps, wireprototypes.bytesresponse)
-
- m = {
- # TODO allow this to be configurable.
- b'apibase': b'api/',
- b'apis': descriptors,
- b'v1capabilities': v1caps.data,
- }
-
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'application/mercurial-cbor'
- res.setbodybytes(b''.join(cborutil.streamencode(m)))
-
- return True
-
-
def _callhttp(repo, req, res, proto, cmd):
# Avoid cycle involving hg module.
from .hgweb import common as hgwebcommon
@@ -461,13 +318,6 @@
proto.checkperm(wireprotov1server.commands[cmd].permission)
- # Possibly handle a modern client wanting to switch protocols.
- if cmd == b'capabilities' and processcapabilitieshandshake(
- repo, req, res, proto
- ):
-
- return
-
rsp = wireprotov1server.dispatch(repo, proto, cmd)
if isinstance(rsp, bytes):
@@ -596,17 +446,6 @@
pass
-class sshv2protocolhandler(sshv1protocolhandler):
- """Protocol handler for version 2 of the SSH protocol."""
-
- @property
- def name(self):
- return wireprototypes.SSHV2
-
- def addcapabilities(self, repo, caps):
- return caps
-
-
def _runsshserver(ui, repo, fin, fout, ev):
# This function operates like a state machine of sorts. The following
# states are defined:
@@ -616,19 +455,6 @@
# new lines. These commands are processed in this state, one command
# after the other.
#
- # protov2-serving
- # Server is in protocol version 2 serving mode.
- #
- # upgrade-initial
- # The server is going to process an upgrade request.
- #
- # upgrade-v2-filter-legacy-handshake
- # The protocol is being upgraded to version 2. The server is expecting
- # the legacy handshake from version 1.
- #
- # upgrade-v2-finish
- # The upgrade to version 2 of the protocol is imminent.
- #
# shutdown
# The server is shutting down, possibly in reaction to a client event.
#
@@ -637,32 +463,9 @@
# protov1-serving -> shutdown
# When server receives an empty request or encounters another
# error.
- #
- # protov1-serving -> upgrade-initial
- # An upgrade request line was seen.
- #
- # upgrade-initial -> upgrade-v2-filter-legacy-handshake
- # Upgrade to version 2 in progress. Server is expecting to
- # process a legacy handshake.
- #
- # upgrade-v2-filter-legacy-handshake -> shutdown
- # Client did not fulfill upgrade handshake requirements.
- #
- # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
- # Client fulfilled version 2 upgrade requirements. Finishing that
- # upgrade.
- #
- # upgrade-v2-finish -> protov2-serving
- # Protocol upgrade to version 2 complete. Server can now speak protocol
- # version 2.
- #
- # protov2-serving -> protov1-serving
- # Ths happens by default since protocol version 2 is the same as
- # version 1 except for the handshake.
state = b'protov1-serving'
proto = sshv1protocolhandler(ui, fin, fout)
- protoswitched = False
while not ev.is_set():
if state == b'protov1-serving':
@@ -674,21 +477,6 @@
state = b'shutdown'
continue
- # It looks like a protocol upgrade request. Transition state to
- # handle it.
- if request.startswith(b'upgrade '):
- if protoswitched:
- _sshv1respondooberror(
- fout,
- ui.ferr,
- b'cannot upgrade protocols multiple times',
- )
- state = b'shutdown'
- continue
-
- state = b'upgrade-initial'
- continue
-
available = wireprotov1server.commands.commandavailable(
request, proto
)
@@ -724,108 +512,6 @@
b'wire protocol command: %s' % rsp
)
- # For now, protocol version 2 serving just goes back to version 1.
- elif state == b'protov2-serving':
- state = b'protov1-serving'
- continue
-
- elif state == b'upgrade-initial':
- # We should never transition into this state if we've switched
- # protocols.
- assert not protoswitched
- assert proto.name == wireprototypes.SSHV1
-
- # Expected: upgrade <token> <capabilities>
- # If we get something else, the request is malformed. It could be
- # from a future client that has altered the upgrade line content.
- # We treat this as an unknown command.
- try:
- token, caps = request.split(b' ')[1:]
- except ValueError:
- _sshv1respondbytes(fout, b'')
- state = b'protov1-serving'
- continue
-
- # Send empty response if we don't support upgrading protocols.
- if not ui.configbool(b'experimental', b'sshserver.support-v2'):
- _sshv1respondbytes(fout, b'')
- state = b'protov1-serving'
- continue
-
- try:
- caps = urlreq.parseqs(caps)
- except ValueError:
- _sshv1respondbytes(fout, b'')
- state = b'protov1-serving'
- continue
-
- # We don't see an upgrade request to protocol version 2. Ignore
- # the upgrade request.
- wantedprotos = caps.get(b'proto', [b''])[0]
- if SSHV2 not in wantedprotos:
- _sshv1respondbytes(fout, b'')
- state = b'protov1-serving'
- continue
-
- # It looks like we can honor this upgrade request to protocol 2.
- # Filter the rest of the handshake protocol request lines.
- state = b'upgrade-v2-filter-legacy-handshake'
- continue
-
- elif state == b'upgrade-v2-filter-legacy-handshake':
- # Client should have sent legacy handshake after an ``upgrade``
- # request. Expected lines:
- #
- # hello
- # between
- # pairs 81
- # 0000...-0000...
-
- ok = True
- for line in (b'hello', b'between', b'pairs 81'):
- request = fin.readline()[:-1]
-
- if request != line:
- _sshv1respondooberror(
- fout,
- ui.ferr,
- b'malformed handshake protocol: missing %s' % line,
- )
- ok = False
- state = b'shutdown'
- break
-
- if not ok:
- continue
-
- request = fin.read(81)
- if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
- _sshv1respondooberror(
- fout,
- ui.ferr,
- b'malformed handshake protocol: '
- b'missing between argument value',
- )
- state = b'shutdown'
- continue
-
- state = b'upgrade-v2-finish'
- continue
-
- elif state == b'upgrade-v2-finish':
- # Send the upgrade response.
- fout.write(b'upgraded %s %s\n' % (token, SSHV2))
- servercaps = wireprotov1server.capabilities(repo, proto)
- rsp = b'capabilities: %s' % servercaps.data
- fout.write(b'%d\n%s\n' % (len(rsp), rsp))
- fout.flush()
-
- proto = sshv2protocolhandler(ui, fin, fout)
- protoswitched = True
-
- state = b'protov2-serving'
- continue
-
elif state == b'shutdown':
break
--- a/mercurial/wireprototypes.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/wireprototypes.py Fri Feb 18 14:27:43 2022 +0100
@@ -21,10 +21,6 @@
# Names of the SSH protocol implementations.
SSHV1 = b'ssh-v1'
-# These are advertised over the wire. Increment the counters at the end
-# to reflect BC breakages.
-SSHV2 = b'exp-ssh-v2-0003'
-HTTP_WIREPROTO_V2 = b'exp-http-v2-0003'
NARROWCAP = b'exp-narrow-1'
ELLIPSESCAP1 = b'exp-ellipses-1'
@@ -37,19 +33,10 @@
b'transport': b'ssh',
b'version': 1,
},
- SSHV2: {
- b'transport': b'ssh',
- # TODO mark as version 2 once all commands are implemented.
- b'version': 1,
- },
b'http-v1': {
b'transport': b'http',
b'version': 1,
},
- HTTP_WIREPROTO_V2: {
- b'transport': b'http',
- b'version': 2,
- },
}
--- a/mercurial/wireprotov1server.py Fri Feb 18 12:55:39 2022 +0100
+++ b/mercurial/wireprotov1server.py Fri Feb 18 14:27:43 2022 +0100
@@ -147,12 +147,6 @@
k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
}
- # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
- # SSHv2.
- # TODO undo this hack when SSH is using the unified frame protocol.
- if name == b'batch':
- transports.add(wireprototypes.SSHV2)
-
if permission not in (b'push', b'pull'):
raise error.ProgrammingError(
b'invalid wire protocol permission; '
@@ -306,7 +300,7 @@
if streamclone.allowservergeneration(repo):
if repo.ui.configbool(b'server', b'preferuncompressed'):
caps.append(b'stream-preferred')
- requiredformats = repo.requirements & repo.supportedformats
+ requiredformats = streamclone.streamed_requirements(repo)
# if our local revlogs are just revlogv1, add 'stream' cap
if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}:
caps.append(b'stream')
--- a/mercurial/wireprotov2peer.py Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,576 +0,0 @@
-# wireprotov2peer.py - client side code for wire protocol version 2
-#
-# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import threading
-
-from .i18n import _
-from . import (
- encoding,
- error,
- pycompat,
- sslutil,
- url as urlmod,
- util,
- wireprotoframing,
- wireprototypes,
-)
-from .utils import cborutil
-
-
-def formatrichmessage(atoms):
- """Format an encoded message from the framing protocol."""
-
- chunks = []
-
- for atom in atoms:
- msg = _(atom[b'msg'])
-
- if b'args' in atom:
- msg = msg % tuple(atom[b'args'])
-
- chunks.append(msg)
-
- return b''.join(chunks)
-
-
-SUPPORTED_REDIRECT_PROTOCOLS = {
- b'http',
- b'https',
-}
-
-SUPPORTED_CONTENT_HASHES = {
- b'sha1',
- b'sha256',
-}
-
-
-def redirecttargetsupported(ui, target):
- """Determine whether a redirect target entry is supported.
-
- ``target`` should come from the capabilities data structure emitted by
- the server.
- """
- if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS:
- ui.note(
- _(b'(remote redirect target %s uses unsupported protocol: %s)\n')
- % (target[b'name'], target.get(b'protocol', b''))
- )
- return False
-
- if target.get(b'snirequired') and not sslutil.hassni:
- ui.note(
- _(b'(redirect target %s requires SNI, which is unsupported)\n')
- % target[b'name']
- )
- return False
-
- if b'tlsversions' in target:
- tlsversions = set(target[b'tlsversions'])
- supported = set()
-
- for v in sslutil.supportedprotocols:
- assert v.startswith(b'tls')
- supported.add(v[3:])
-
- if not tlsversions & supported:
- ui.note(
- _(
- b'(remote redirect target %s requires unsupported TLS '
- b'versions: %s)\n'
- )
- % (target[b'name'], b', '.join(sorted(tlsversions)))
- )
- return False
-
- ui.note(_(b'(remote redirect target %s is compatible)\n') % target[b'name'])
-
- return True
-
-
-def supportedredirects(ui, apidescriptor):
- """Resolve the "redirect" command request key given an API descriptor.
-
- Given an API descriptor returned by the server, returns a data structure
- that can be used in hte "redirect" field of command requests to advertise
- support for compatible redirect targets.
-
- Returns None if no redirect targets are remotely advertised or if none are
- supported.
- """
- if not apidescriptor or b'redirect' not in apidescriptor:
- return None
-
- targets = [
- t[b'name']
- for t in apidescriptor[b'redirect'][b'targets']
- if redirecttargetsupported(ui, t)
- ]
-
- hashes = [
- h
- for h in apidescriptor[b'redirect'][b'hashes']
- if h in SUPPORTED_CONTENT_HASHES
- ]
-
- return {
- b'targets': targets,
- b'hashes': hashes,
- }
-
-
-class commandresponse(object):
- """Represents the response to a command request.
-
- Instances track the state of the command and hold its results.
-
- An external entity is required to update the state of the object when
- events occur.
- """
-
- def __init__(self, requestid, command, fromredirect=False):
- self.requestid = requestid
- self.command = command
- self.fromredirect = fromredirect
-
- # Whether all remote input related to this command has been
- # received.
- self._inputcomplete = False
-
- # We have a lock that is acquired when important object state is
- # mutated. This is to prevent race conditions between 1 thread
- # sending us new data and another consuming it.
- self._lock = threading.RLock()
-
- # An event is set when state of the object changes. This event
- # is waited on by the generator emitting objects.
- self._serviceable = threading.Event()
-
- self._pendingevents = []
- self._pendingerror = None
- self._decoder = cborutil.bufferingdecoder()
- self._seeninitial = False
- self._redirect = None
-
- def _oninputcomplete(self):
- with self._lock:
- self._inputcomplete = True
- self._serviceable.set()
-
- def _onresponsedata(self, data):
- available, readcount, wanted = self._decoder.decode(data)
-
- if not available:
- return
-
- with self._lock:
- for o in self._decoder.getavailable():
- if not self._seeninitial and not self.fromredirect:
- self._handleinitial(o)
- continue
-
- # We should never see an object after a content redirect,
- # as the spec says the main status object containing the
- # content redirect is the only object in the stream. Fail
- # if we see a misbehaving server.
- if self._redirect:
- raise error.Abort(
- _(
- b'received unexpected response data '
- b'after content redirect; the remote is '
- b'buggy'
- )
- )
-
- self._pendingevents.append(o)
-
- self._serviceable.set()
-
- def _onerror(self, e):
- self._pendingerror = e
-
- with self._lock:
- self._serviceable.set()
-
- def _handleinitial(self, o):
- self._seeninitial = True
- if o[b'status'] == b'ok':
- return
-
- elif o[b'status'] == b'redirect':
- l = o[b'location']
- self._redirect = wireprototypes.alternatelocationresponse(
- url=l[b'url'],
- mediatype=l[b'mediatype'],
- size=l.get(b'size'),
- fullhashes=l.get(b'fullhashes'),
- fullhashseed=l.get(b'fullhashseed'),
- serverdercerts=l.get(b'serverdercerts'),
- servercadercerts=l.get(b'servercadercerts'),
- )
- return
-
- atoms = [{b'msg': o[b'error'][b'message']}]
- if b'args' in o[b'error']:
- atoms[0][b'args'] = o[b'error'][b'args']
-
- raise error.RepoError(formatrichmessage(atoms))
-
- def objects(self):
- """Obtained decoded objects from this response.
-
- This is a generator of data structures that were decoded from the
- command response.
-
- Obtaining the next member of the generator may block due to waiting
- on external data to become available.
-
- If the server encountered an error in the middle of serving the data
- or if another error occurred, an exception may be raised when
- advancing the generator.
- """
- while True:
- # TODO this can infinite loop if self._inputcomplete is never
- # set. We likely want to tie the lifetime of this object/state
- # to that of the background thread receiving frames and updating
- # our state.
- self._serviceable.wait(1.0)
-
- if self._pendingerror:
- raise self._pendingerror
-
- with self._lock:
- self._serviceable.clear()
-
- # Make copies because objects could be mutated during
- # iteration.
- stop = self._inputcomplete
- pending = list(self._pendingevents)
- self._pendingevents[:] = []
-
- for o in pending:
- yield o
-
- if stop:
- break
-
-
-class clienthandler(object):
- """Object to handle higher-level client activities.
-
- The ``clientreactor`` is used to hold low-level state about the frame-based
- protocol, such as which requests and streams are active. This type is used
- for higher-level operations, such as reading frames from a socket, exposing
- and managing a higher-level primitive for representing command responses,
- etc. This class is what peers should probably use to bridge wire activity
- with the higher-level peer API.
- """
-
- def __init__(
- self, ui, clientreactor, opener=None, requestbuilder=util.urlreq.request
- ):
- self._ui = ui
- self._reactor = clientreactor
- self._requests = {}
- self._futures = {}
- self._responses = {}
- self._redirects = []
- self._frameseof = False
- self._opener = opener or urlmod.opener(ui)
- self._requestbuilder = requestbuilder
-
- def callcommand(self, command, args, f, redirect=None):
- """Register a request to call a command.
-
- Returns an iterable of frames that should be sent over the wire.
- """
- request, action, meta = self._reactor.callcommand(
- command, args, redirect=redirect
- )
-
- if action != b'noop':
- raise error.ProgrammingError(b'%s not yet supported' % action)
-
- rid = request.requestid
- self._requests[rid] = request
- self._futures[rid] = f
- # TODO we need some kind of lifetime on response instances otherwise
- # objects() may deadlock.
- self._responses[rid] = commandresponse(rid, command)
-
- return iter(())
-
- def flushcommands(self):
- """Flush all queued commands.
-
- Returns an iterable of frames that should be sent over the wire.
- """
- action, meta = self._reactor.flushcommands()
-
- if action != b'sendframes':
- raise error.ProgrammingError(b'%s not yet supported' % action)
-
- return meta[b'framegen']
-
- def readdata(self, framefh):
- """Attempt to read data and do work.
-
- Returns None if no data was read. Presumably this means we're
- done with all read I/O.
- """
- if not self._frameseof:
- frame = wireprotoframing.readframe(framefh)
- if frame is None:
- # TODO tell reactor?
- self._frameseof = True
- else:
- self._ui.debug(b'received %r\n' % frame)
- self._processframe(frame)
-
- # Also try to read the first redirect.
- if self._redirects:
- if not self._processredirect(*self._redirects[0]):
- self._redirects.pop(0)
-
- if self._frameseof and not self._redirects:
- return None
-
- return True
-
- def _processframe(self, frame):
- """Process a single read frame."""
-
- action, meta = self._reactor.onframerecv(frame)
-
- if action == b'error':
- e = error.RepoError(meta[b'message'])
-
- if frame.requestid in self._responses:
- self._responses[frame.requestid]._oninputcomplete()
-
- if frame.requestid in self._futures:
- self._futures[frame.requestid].set_exception(e)
- del self._futures[frame.requestid]
- else:
- raise e
-
- return
- elif action == b'noop':
- return
- elif action == b'responsedata':
- # Handled below.
- pass
- else:
- raise error.ProgrammingError(b'action not handled: %s' % action)
-
- if frame.requestid not in self._requests:
- raise error.ProgrammingError(
- b'received frame for unknown request; this is either a bug in '
- b'the clientreactor not screening for this or this instance was '
- b'never told about this request: %r' % frame
- )
-
- response = self._responses[frame.requestid]
-
- if action == b'responsedata':
- # Any failures processing this frame should bubble up to the
- # future tracking the request.
- try:
- self._processresponsedata(frame, meta, response)
- except BaseException as e:
- # If an exception occurs before the future is resolved,
- # fail the future. Otherwise, we stuff the exception on
- # the response object so it can be raised during objects()
- # iteration. If nothing is consuming objects(), we could
- # silently swallow this exception. That's a risk we'll have to
- # take.
- if frame.requestid in self._futures:
- self._futures[frame.requestid].set_exception(e)
- del self._futures[frame.requestid]
- response._oninputcomplete()
- else:
- response._onerror(e)
- else:
- raise error.ProgrammingError(
- b'unhandled action from clientreactor: %s' % action
- )
-
- def _processresponsedata(self, frame, meta, response):
- # This can raise. The caller can handle it.
- response._onresponsedata(meta[b'data'])
-
- # We need to be careful about resolving futures prematurely. If a
- # response is a redirect response, resolving the future before the
- # redirect is processed would result in the consumer seeing an
- # empty stream of objects, since they'd be consuming our
- # response.objects() instead of the redirect's response.objects().
- #
- # Our strategy is to not resolve/finish the request until either
- # EOS occurs or until the initial response object is fully received.
-
- # Always react to eos.
- if meta[b'eos']:
- response._oninputcomplete()
- del self._requests[frame.requestid]
-
- # Not EOS but we haven't decoded the initial response object yet.
- # Return and wait for more data.
- elif not response._seeninitial:
- return
-
- # The specification says no objects should follow the initial/redirect
- # object. So it should be safe to handle the redirect object if one is
- # decoded, without having to wait for EOS.
- if response._redirect:
- self._followredirect(frame.requestid, response._redirect)
- return
-
- # If the command has a decoder, we wait until all input has been
- # received before resolving the future. Otherwise we resolve the
- # future immediately.
- if frame.requestid not in self._futures:
- return
-
- if response.command not in COMMAND_DECODERS:
- self._futures[frame.requestid].set_result(response.objects())
- del self._futures[frame.requestid]
- elif response._inputcomplete:
- decoded = COMMAND_DECODERS[response.command](response.objects())
- self._futures[frame.requestid].set_result(decoded)
- del self._futures[frame.requestid]
-
- def _followredirect(self, requestid, redirect):
- """Called to initiate redirect following for a request."""
- self._ui.note(_(b'(following redirect to %s)\n') % redirect.url)
-
- # TODO handle framed responses.
- if redirect.mediatype != b'application/mercurial-cbor':
- raise error.Abort(
- _(b'cannot handle redirects for the %s media type')
- % redirect.mediatype
- )
-
- if redirect.fullhashes:
- self._ui.warn(
- _(
- b'(support for validating hashes on content '
- b'redirects not supported)\n'
- )
- )
-
- if redirect.serverdercerts or redirect.servercadercerts:
- self._ui.warn(
- _(
- b'(support for pinning server certificates on '
- b'content redirects not supported)\n'
- )
- )
-
- headers = {
- 'Accept': redirect.mediatype,
- }
-
- req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers)
-
- try:
- res = self._opener.open(req)
- except util.urlerr.httperror as e:
- if e.code == 401:
- raise error.Abort(_(b'authorization failed'))
- raise
- except util.httplib.HTTPException as e:
- self._ui.debug(b'http error requesting %s\n' % req.get_full_url())
- self._ui.traceback()
- raise IOError(None, e)
-
- urlmod.wrapresponse(res)
-
- # The existing response object is associated with frame data. Rather
- # than try to normalize its state, just create a new object.
- oldresponse = self._responses[requestid]
- self._responses[requestid] = commandresponse(
- requestid, oldresponse.command, fromredirect=True
- )
-
- self._redirects.append((requestid, res))
-
- def _processredirect(self, rid, res):
- """Called to continue processing a response from a redirect.
-
- Returns a bool indicating if the redirect is still serviceable.
- """
- response = self._responses[rid]
-
- try:
- data = res.read(32768)
- response._onresponsedata(data)
-
- # We're at end of stream.
- if not data:
- response._oninputcomplete()
-
- if rid not in self._futures:
- return bool(data)
-
- if response.command not in COMMAND_DECODERS:
- self._futures[rid].set_result(response.objects())
- del self._futures[rid]
- elif response._inputcomplete:
- decoded = COMMAND_DECODERS[response.command](response.objects())
- self._futures[rid].set_result(decoded)
- del self._futures[rid]
-
- return bool(data)
-
- except BaseException as e:
- self._futures[rid].set_exception(e)
- del self._futures[rid]
- response._oninputcomplete()
- return False
-
-
-def decodebranchmap(objs):
- # Response should be a single CBOR map of branch name to array of nodes.
- bm = next(objs)
-
- return {encoding.tolocal(k): v for k, v in bm.items()}
-
-
-def decodeheads(objs):
- # Array of node bytestrings.
- return next(objs)
-
-
-def decodeknown(objs):
- # Bytestring where each byte is a 0 or 1.
- raw = next(objs)
-
- return [True if raw[i : i + 1] == b'1' else False for i in range(len(raw))]
-
-
-def decodelistkeys(objs):
- # Map with bytestring keys and values.
- return next(objs)
-
-
-def decodelookup(objs):
- return next(objs)
-
-
-def decodepushkey(objs):
- return next(objs)
-
-
-COMMAND_DECODERS = {
- b'branchmap': decodebranchmap,
- b'heads': decodeheads,
- b'known': decodeknown,
- b'listkeys': decodelistkeys,
- b'lookup': decodelookup,
- b'pushkey': decodepushkey,
-}
--- a/mercurial/wireprotov2server.py Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1613 +0,0 @@
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import collections
-import contextlib
-
-from .i18n import _
-from .node import hex
-from . import (
- discovery,
- encoding,
- error,
- match as matchmod,
- narrowspec,
- pycompat,
- streamclone,
- templatefilters,
- util,
- wireprotoframing,
- wireprototypes,
-)
-from .interfaces import util as interfaceutil
-from .utils import (
- cborutil,
- hashutil,
- stringutil,
-)
-
-FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
-
-HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
-
-COMMANDS = wireprototypes.commanddict()
-
-# Value inserted into cache key computation function. Change the value to
-# force new cache keys for every command request. This should be done when
-# there is a change to how caching works, etc.
-GLOBAL_CACHE_VERSION = 1
-
-
-def handlehttpv2request(rctx, req, res, checkperm, urlparts):
- from .hgweb import common as hgwebcommon
-
- # URL space looks like: <permissions>/<command>, where <permission> can
- # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
-
- # Root URL does nothing meaningful... yet.
- if not urlparts:
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'HTTP version 2 API handler'))
- return
-
- if len(urlparts) == 1:
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'do not know how to process %s\n') % req.dispatchpath
- )
- return
-
- permission, command = urlparts[0:2]
-
- if permission not in (b'ro', b'rw'):
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'unknown permission: %s') % permission)
- return
-
- if req.method != b'POST':
- res.status = b'405 Method Not Allowed'
- res.headers[b'Allow'] = b'POST'
- res.setbodybytes(_(b'commands require POST requests'))
- return
-
- # At some point we'll want to use our own API instead of recycling the
- # behavior of version 1 of the wire protocol...
- # TODO return reasonable responses - not responses that overload the
- # HTTP status line message for error reporting.
- try:
- checkperm(rctx, req, b'pull' if permission == b'ro' else b'push')
- except hgwebcommon.ErrorResponse as e:
- res.status = hgwebcommon.statusmessage(
- e.code, stringutil.forcebytestr(e)
- )
- for k, v in e.headers:
- res.headers[k] = v
- res.setbodybytes(b'permission denied')
- return
-
- # We have a special endpoint to reflect the request back at the client.
- if command == b'debugreflect':
- _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
- return
-
- # Extra commands that we handle that aren't really wire protocol
- # commands. Think extra hard before making this hackery available to
- # extension.
- extracommands = {b'multirequest'}
-
- if command not in COMMANDS and command not in extracommands:
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'unknown wire protocol command: %s\n') % command)
- return
-
- repo = rctx.repo
- ui = repo.ui
-
- proto = httpv2protocolhandler(req, ui)
-
- if (
- not COMMANDS.commandavailable(command, proto)
- and command not in extracommands
- ):
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'invalid wire protocol command: %s') % command)
- return
-
- # TODO consider cases where proxies may add additional Accept headers.
- if req.headers.get(b'Accept') != FRAMINGTYPE:
- res.status = b'406 Not Acceptable'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'client MUST specify Accept header with value: %s\n')
- % FRAMINGTYPE
- )
- return
-
- if req.headers.get(b'Content-Type') != FRAMINGTYPE:
- res.status = b'415 Unsupported Media Type'
- # TODO we should send a response with appropriate media type,
- # since client does Accept it.
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'client MUST send Content-Type header with value: %s\n')
- % FRAMINGTYPE
- )
- return
-
- _processhttpv2request(ui, repo, req, res, permission, command, proto)
-
-
-def _processhttpv2reflectrequest(ui, repo, req, res):
- """Reads unified frame protocol request and dumps out state to client.
-
- This special endpoint can be used to help debug the wire protocol.
-
- Instead of routing the request through the normal dispatch mechanism,
- we instead read all frames, decode them, and feed them into our state
- tracker. We then dump the log of all that activity back out to the
- client.
- """
- # Reflection APIs have a history of being abused, accidentally disclosing
- # sensitive data, etc. So we have a config knob.
- if not ui.configbool(b'experimental', b'web.api.debugreflect'):
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'debugreflect service not available'))
- return
-
- # We assume we have a unified framing protocol request body.
-
- reactor = wireprotoframing.serverreactor(ui)
- states = []
-
- while True:
- frame = wireprotoframing.readframe(req.bodyfh)
-
- if not frame:
- states.append(b'received: <no frame>')
- break
-
- states.append(
- b'received: %d %d %d %s'
- % (frame.typeid, frame.flags, frame.requestid, frame.payload)
- )
-
- action, meta = reactor.onframerecv(frame)
- states.append(templatefilters.json((action, meta)))
-
- action, meta = reactor.oninputeof()
- meta[b'action'] = action
- states.append(templatefilters.json(meta))
-
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(b'\n'.join(states))
-
-
-def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
- """Post-validation handler for HTTPv2 requests.
-
- Called when the HTTP request contains unified frame-based protocol
- frames for evaluation.
- """
- # TODO Some HTTP clients are full duplex and can receive data before
- # the entire request is transmitted. Figure out a way to indicate support
- # for that so we can opt into full duplex mode.
- reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
- seencommand = False
-
- outstream = None
-
- while True:
- frame = wireprotoframing.readframe(req.bodyfh)
- if not frame:
- break
-
- action, meta = reactor.onframerecv(frame)
-
- if action == b'wantframe':
- # Need more data before we can do anything.
- continue
- elif action == b'runcommand':
- # Defer creating output stream because we need to wait for
- # protocol settings frames so proper encoding can be applied.
- if not outstream:
- outstream = reactor.makeoutputstream()
-
- sentoutput = _httpv2runcommand(
- ui,
- repo,
- req,
- res,
- authedperm,
- reqcommand,
- reactor,
- outstream,
- meta,
- issubsequent=seencommand,
- )
-
- if sentoutput:
- return
-
- seencommand = True
-
- elif action == b'error':
- # TODO define proper error mechanism.
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(meta[b'message'] + b'\n')
- return
- else:
- raise error.ProgrammingError(
- b'unhandled action from frame processor: %s' % action
- )
-
- action, meta = reactor.oninputeof()
- if action == b'sendframes':
- # We assume we haven't started sending the response yet. If we're
- # wrong, the response type will raise an exception.
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = FRAMINGTYPE
- res.setbodygen(meta[b'framegen'])
- elif action == b'noop':
- pass
- else:
- raise error.ProgrammingError(
- b'unhandled action from frame processor: %s' % action
- )
-
-
-def _httpv2runcommand(
- ui,
- repo,
- req,
- res,
- authedperm,
- reqcommand,
- reactor,
- outstream,
- command,
- issubsequent,
-):
- """Dispatch a wire protocol command made from HTTPv2 requests.
-
- The authenticated permission (``authedperm``) along with the original
- command from the URL (``reqcommand``) are passed in.
- """
- # We already validated that the session has permissions to perform the
- # actions in ``authedperm``. In the unified frame protocol, the canonical
- # command to run is expressed in a frame. However, the URL also requested
- # to run a specific command. We need to be careful that the command we
- # run doesn't have permissions requirements greater than what was granted
- # by ``authedperm``.
- #
- # Our rule for this is we only allow one command per HTTP request and
- # that command must match the command in the URL. However, we make
- # an exception for the ``multirequest`` URL. This URL is allowed to
- # execute multiple commands. We double check permissions of each command
- # as it is invoked to ensure there is no privilege escalation.
- # TODO consider allowing multiple commands to regular command URLs
- # iff each command is the same.
-
- proto = httpv2protocolhandler(req, ui, args=command[b'args'])
-
- if reqcommand == b'multirequest':
- if not COMMANDS.commandavailable(command[b'command'], proto):
- # TODO proper error mechanism
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'wire protocol command not available: %s')
- % command[b'command']
- )
- return True
-
- # TODO don't use assert here, since it may be elided by -O.
- assert authedperm in (b'ro', b'rw')
- wirecommand = COMMANDS[command[b'command']]
- assert wirecommand.permission in (b'push', b'pull')
-
- if authedperm == b'ro' and wirecommand.permission != b'pull':
- # TODO proper error mechanism
- res.status = b'403 Forbidden'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'insufficient permissions to execute command: %s')
- % command[b'command']
- )
- return True
-
- # TODO should we also call checkperm() here? Maybe not if we're going
- # to overhaul that API. The granted scope from the URL check should
- # be good enough.
-
- else:
- # Don't allow multiple commands outside of ``multirequest`` URL.
- if issubsequent:
- # TODO proper error mechanism
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(
- _(b'multiple commands cannot be issued to this URL')
- )
- return True
-
- if reqcommand != command[b'command']:
- # TODO define proper error mechanism
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(_(b'command in frame must match command in URL'))
- return True
-
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = FRAMINGTYPE
-
- try:
- objs = dispatch(repo, proto, command[b'command'], command[b'redirect'])
-
- action, meta = reactor.oncommandresponsereadyobjects(
- outstream, command[b'requestid'], objs
- )
-
- except error.WireprotoCommandError as e:
- action, meta = reactor.oncommanderror(
- outstream, command[b'requestid'], e.message, e.messageargs
- )
-
- except Exception as e:
- action, meta = reactor.onservererror(
- outstream,
- command[b'requestid'],
- _(b'exception when invoking command: %s')
- % stringutil.forcebytestr(e),
- )
-
- if action == b'sendframes':
- res.setbodygen(meta[b'framegen'])
- return True
- elif action == b'noop':
- return False
- else:
- raise error.ProgrammingError(
- b'unhandled event from reactor: %s' % action
- )
-
-
-def getdispatchrepo(repo, proto, command):
- viewconfig = repo.ui.config(b'server', b'view')
- return repo.filtered(viewconfig)
-
-
-def dispatch(repo, proto, command, redirect):
- """Run a wire protocol command.
-
- Returns an iterable of objects that will be sent to the client.
- """
- repo = getdispatchrepo(repo, proto, command)
-
- entry = COMMANDS[command]
- func = entry.func
- spec = entry.args
-
- args = proto.getargs(spec)
-
- # There is some duplicate boilerplate code here for calling the command and
- # emitting objects. It is either that or a lot of indented code that looks
- # like a pyramid (since there are a lot of code paths that result in not
- # using the cacher).
- callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
-
- # Request is not cacheable. Don't bother instantiating a cacher.
- if not entry.cachekeyfn:
- for o in callcommand():
- yield o
- return
-
- if redirect:
- redirecttargets = redirect[b'targets']
- redirecthashes = redirect[b'hashes']
- else:
- redirecttargets = []
- redirecthashes = []
-
- cacher = makeresponsecacher(
- repo,
- proto,
- command,
- args,
- cborutil.streamencode,
- redirecttargets=redirecttargets,
- redirecthashes=redirecthashes,
- )
-
- # But we have no cacher. Do default handling.
- if not cacher:
- for o in callcommand():
- yield o
- return
-
- with cacher:
- cachekey = entry.cachekeyfn(
- repo, proto, cacher, **pycompat.strkwargs(args)
- )
-
- # No cache key or the cacher doesn't like it. Do default handling.
- if cachekey is None or not cacher.setcachekey(cachekey):
- for o in callcommand():
- yield o
- return
-
- # Serve it from the cache, if possible.
- cached = cacher.lookup()
-
- if cached:
- for o in cached[b'objs']:
- yield o
- return
-
- # Else call the command and feed its output into the cacher, allowing
- # the cacher to buffer/mutate objects as it desires.
- for o in callcommand():
- for o in cacher.onobject(o):
- yield o
-
- for o in cacher.onfinished():
- yield o
-
-
-@interfaceutil.implementer(wireprototypes.baseprotocolhandler)
-class httpv2protocolhandler(object):
- def __init__(self, req, ui, args=None):
- self._req = req
- self._ui = ui
- self._args = args
-
- @property
- def name(self):
- return HTTP_WIREPROTO_V2
-
- def getargs(self, args):
- # First look for args that were passed but aren't registered on this
- # command.
- extra = set(self._args) - set(args)
- if extra:
- raise error.WireprotoCommandError(
- b'unsupported argument to command: %s'
- % b', '.join(sorted(extra))
- )
-
- # And look for required arguments that are missing.
- missing = {a for a in args if args[a][b'required']} - set(self._args)
-
- if missing:
- raise error.WireprotoCommandError(
- b'missing required arguments: %s' % b', '.join(sorted(missing))
- )
-
- # Now derive the arguments to pass to the command, taking into
- # account the arguments specified by the client.
- data = {}
- for k, meta in sorted(args.items()):
- # This argument wasn't passed by the client.
- if k not in self._args:
- data[k] = meta[b'default']()
- continue
-
- v = self._args[k]
-
- # Sets may be expressed as lists. Silently normalize.
- if meta[b'type'] == b'set' and isinstance(v, list):
- v = set(v)
-
- # TODO consider more/stronger type validation.
-
- data[k] = v
-
- return data
-
- def getprotocaps(self):
- # Protocol capabilities are currently not implemented for HTTP V2.
- return set()
-
- def getpayload(self):
- raise NotImplementedError
-
- @contextlib.contextmanager
- def mayberedirectstdio(self):
- raise NotImplementedError
-
- def client(self):
- raise NotImplementedError
-
- def addcapabilities(self, repo, caps):
- return caps
-
- def checkperm(self, perm):
- raise NotImplementedError
-
-
-def httpv2apidescriptor(req, repo):
- proto = httpv2protocolhandler(req, repo.ui)
-
- return _capabilitiesv2(repo, proto)
-
-
-def _capabilitiesv2(repo, proto):
- """Obtain the set of capabilities for version 2 transports.
-
- These capabilities are distinct from the capabilities for version 1
- transports.
- """
- caps = {
- b'commands': {},
- b'framingmediatypes': [FRAMINGTYPE],
- b'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
- }
-
- for command, entry in COMMANDS.items():
- args = {}
-
- for arg, meta in entry.args.items():
- args[arg] = {
- # TODO should this be a normalized type using CBOR's
- # terminology?
- b'type': meta[b'type'],
- b'required': meta[b'required'],
- }
-
- if not meta[b'required']:
- args[arg][b'default'] = meta[b'default']()
-
- if meta[b'validvalues']:
- args[arg][b'validvalues'] = meta[b'validvalues']
-
- # TODO this type of check should be defined in a per-command callback.
- if (
- command == b'rawstorefiledata'
- and not streamclone.allowservergeneration(repo)
- ):
- continue
-
- caps[b'commands'][command] = {
- b'args': args,
- b'permissions': [entry.permission],
- }
-
- if entry.extracapabilitiesfn:
- extracaps = entry.extracapabilitiesfn(repo, proto)
- caps[b'commands'][command].update(extracaps)
-
- caps[b'rawrepoformats'] = sorted(repo.requirements & repo.supportedformats)
-
- targets = getadvertisedredirecttargets(repo, proto)
- if targets:
- caps[b'redirect'] = {
- b'targets': [],
- b'hashes': [b'sha256', b'sha1'],
- }
-
- for target in targets:
- entry = {
- b'name': target[b'name'],
- b'protocol': target[b'protocol'],
- b'uris': target[b'uris'],
- }
-
- for key in (b'snirequired', b'tlsversions'):
- if key in target:
- entry[key] = target[key]
-
- caps[b'redirect'][b'targets'].append(entry)
-
- return proto.addcapabilities(repo, caps)
-
-
-def getadvertisedredirecttargets(repo, proto):
- """Obtain a list of content redirect targets.
-
- Returns a list containing potential redirect targets that will be
- advertised in capabilities data. Each dict MUST have the following
- keys:
-
- name
- The name of this redirect target. This is the identifier clients use
- to refer to a target. It is transferred as part of every command
- request.
-
- protocol
- Network protocol used by this target. Typically this is the string
- in front of the ``://`` in a URL. e.g. ``https``.
-
- uris
- List of representative URIs for this target. Clients can use the
- URIs to test parsing for compatibility or for ordering preference
- for which target to use.
-
- The following optional keys are recognized:
-
- snirequired
- Bool indicating if Server Name Indication (SNI) is required to
- connect to this target.
-
- tlsversions
- List of bytes indicating which TLS versions are supported by this
- target.
-
- By default, clients reflect the target order advertised by servers
- and servers will use the first client-advertised target when picking
- a redirect target. So targets should be advertised in the order the
- server prefers they be used.
- """
- return []
-
-
-def wireprotocommand(
- name,
- args=None,
- permission=b'push',
- cachekeyfn=None,
- extracapabilitiesfn=None,
-):
- """Decorator to declare a wire protocol command.
-
- ``name`` is the name of the wire protocol command being provided.
-
- ``args`` is a dict defining arguments accepted by the command. Keys are
- the argument name. Values are dicts with the following keys:
-
- ``type``
- The argument data type. Must be one of the following string
- literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
- or ``bool``.
-
- ``default``
- A callable returning the default value for this argument. If not
- specified, ``None`` will be the default value.
-
- ``example``
- An example value for this argument.
-
- ``validvalues``
- Set of recognized values for this argument.
-
- ``permission`` defines the permission type needed to run this command.
- Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
- respectively. Default is to assume command requires ``push`` permissions
- because otherwise commands not declaring their permissions could modify
- a repository that is supposed to be read-only.
-
- ``cachekeyfn`` defines an optional callable that can derive the
- cache key for this request.
-
- ``extracapabilitiesfn`` defines an optional callable that defines extra
- command capabilities/parameters that are advertised next to the command
- in the capabilities data structure describing the server. The callable
- receives as arguments the repository and protocol objects. It returns
- a dict of extra fields to add to the command descriptor.
-
- Wire protocol commands are generators of objects to be serialized and
- sent to the client.
-
- If a command raises an uncaught exception, this will be translated into
- a command error.
-
- All commands can opt in to being cacheable by defining a function
- (``cachekeyfn``) that is called to derive a cache key. This function
- receives the same arguments as the command itself plus a ``cacher``
- argument containing the active cacher for the request and returns a bytes
- containing the key in a cache the response to this command may be cached
- under.
- """
- transports = {
- k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 2
- }
-
- if permission not in (b'push', b'pull'):
- raise error.ProgrammingError(
- b'invalid wire protocol permission; '
- b'got %s; expected "push" or "pull"' % permission
- )
-
- if args is None:
- args = {}
-
- if not isinstance(args, dict):
- raise error.ProgrammingError(
- b'arguments for version 2 commands must be declared as dicts'
- )
-
- for arg, meta in args.items():
- if arg == b'*':
- raise error.ProgrammingError(
- b'* argument name not allowed on version 2 commands'
- )
-
- if not isinstance(meta, dict):
- raise error.ProgrammingError(
- b'arguments for version 2 commands '
- b'must declare metadata as a dict'
- )
-
- if b'type' not in meta:
- raise error.ProgrammingError(
- b'%s argument for command %s does not '
- b'declare type field' % (arg, name)
- )
-
- if meta[b'type'] not in (
- b'bytes',
- b'int',
- b'list',
- b'dict',
- b'set',
- b'bool',
- ):
- raise error.ProgrammingError(
- b'%s argument for command %s has '
- b'illegal type: %s' % (arg, name, meta[b'type'])
- )
-
- if b'example' not in meta:
- raise error.ProgrammingError(
- b'%s argument for command %s does not '
- b'declare example field' % (arg, name)
- )
-
- meta[b'required'] = b'default' not in meta
-
- meta.setdefault(b'default', lambda: None)
- meta.setdefault(b'validvalues', None)
-
- def register(func):
- if name in COMMANDS:
- raise error.ProgrammingError(
- b'%s command already registered for version 2' % name
- )
-
- COMMANDS[name] = wireprototypes.commandentry(
- func,
- args=args,
- transports=transports,
- permission=permission,
- cachekeyfn=cachekeyfn,
- extracapabilitiesfn=extracapabilitiesfn,
- )
-
- return func
-
- return register
-
-
-def makecommandcachekeyfn(command, localversion=None, allargs=False):
- """Construct a cache key derivation function with common features.
-
- By default, the cache key is a hash of:
-
- * The command name.
- * A global cache version number.
- * A local cache version number (passed via ``localversion``).
- * All the arguments passed to the command.
- * The media type used.
- * Wire protocol version string.
- * The repository path.
- """
- if not allargs:
- raise error.ProgrammingError(
- b'only allargs=True is currently supported'
- )
-
- if localversion is None:
- raise error.ProgrammingError(b'must set localversion argument value')
-
- def cachekeyfn(repo, proto, cacher, **args):
- spec = COMMANDS[command]
-
- # Commands that mutate the repo can not be cached.
- if spec.permission == b'push':
- return None
-
- # TODO config option to disable caching.
-
- # Our key derivation strategy is to construct a data structure
- # holding everything that could influence cacheability and to hash
- # the CBOR representation of that. Using CBOR seems like it might
- # be overkill. However, simpler hashing mechanisms are prone to
- # duplicate input issues. e.g. if you just concatenate two values,
- # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
- # "padding" between values and prevents these problems.
-
- # Seed the hash with various data.
- state = {
- # To invalidate all cache keys.
- b'globalversion': GLOBAL_CACHE_VERSION,
- # More granular cache key invalidation.
- b'localversion': localversion,
- # Cache keys are segmented by command.
- b'command': command,
- # Throw in the media type and API version strings so changes
- # to exchange semantics invalid cache.
- b'mediatype': FRAMINGTYPE,
- b'version': HTTP_WIREPROTO_V2,
- # So same requests for different repos don't share cache keys.
- b'repo': repo.root,
- }
-
- # The arguments passed to us will have already been normalized.
- # Default values will be set, etc. This is important because it
- # means that it doesn't matter if clients send an explicit argument
- # or rely on the default value: it will all normalize to the same
- # set of arguments on the server and therefore the same cache key.
- #
- # Arguments by their very nature must support being encoded to CBOR.
- # And the CBOR encoder is deterministic. So we hash the arguments
- # by feeding the CBOR of their representation into the hasher.
- if allargs:
- state[b'args'] = pycompat.byteskwargs(args)
-
- cacher.adjustcachekeystate(state)
-
- hasher = hashutil.sha1()
- for chunk in cborutil.streamencode(state):
- hasher.update(chunk)
-
- return pycompat.sysbytes(hasher.hexdigest())
-
- return cachekeyfn
-
-
-def makeresponsecacher(
- repo, proto, command, args, objencoderfn, redirecttargets, redirecthashes
-):
- """Construct a cacher for a cacheable command.
-
- Returns an ``iwireprotocolcommandcacher`` instance.
-
- Extensions can monkeypatch this function to provide custom caching
- backends.
- """
- return None
-
-
-def resolvenodes(repo, revisions):
- """Resolve nodes from a revisions specifier data structure."""
- cl = repo.changelog
- clhasnode = cl.hasnode
-
- seen = set()
- nodes = []
-
- if not isinstance(revisions, list):
- raise error.WireprotoCommandError(
- b'revisions must be defined as an array'
- )
-
- for spec in revisions:
- if b'type' not in spec:
- raise error.WireprotoCommandError(
- b'type key not present in revision specifier'
- )
-
- typ = spec[b'type']
-
- if typ == b'changesetexplicit':
- if b'nodes' not in spec:
- raise error.WireprotoCommandError(
- b'nodes key not present in changesetexplicit revision '
- b'specifier'
- )
-
- for node in spec[b'nodes']:
- if node not in seen:
- nodes.append(node)
- seen.add(node)
-
- elif typ == b'changesetexplicitdepth':
- for key in (b'nodes', b'depth'):
- if key not in spec:
- raise error.WireprotoCommandError(
- b'%s key not present in changesetexplicitdepth revision '
- b'specifier',
- (key,),
- )
-
- for rev in repo.revs(
- b'ancestors(%ln, %s)', spec[b'nodes'], spec[b'depth'] - 1
- ):
- node = cl.node(rev)
-
- if node not in seen:
- nodes.append(node)
- seen.add(node)
-
- elif typ == b'changesetdagrange':
- for key in (b'roots', b'heads'):
- if key not in spec:
- raise error.WireprotoCommandError(
- b'%s key not present in changesetdagrange revision '
- b'specifier',
- (key,),
- )
-
- if not spec[b'heads']:
- raise error.WireprotoCommandError(
- b'heads key in changesetdagrange cannot be empty'
- )
-
- if spec[b'roots']:
- common = [n for n in spec[b'roots'] if clhasnode(n)]
- else:
- common = [repo.nullid]
-
- for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
- if n not in seen:
- nodes.append(n)
- seen.add(n)
-
- else:
- raise error.WireprotoCommandError(
- b'unknown revision specifier type: %s', (typ,)
- )
-
- return nodes
-
-
-@wireprotocommand(b'branchmap', permission=b'pull')
-def branchmapv2(repo, proto):
- yield {
- encoding.fromlocal(k): v
- for k, v in pycompat.iteritems(repo.branchmap())
- }
-
-
-@wireprotocommand(b'capabilities', permission=b'pull')
-def capabilitiesv2(repo, proto):
- yield _capabilitiesv2(repo, proto)
-
-
-@wireprotocommand(
- b'changesetdata',
- args={
- b'revisions': {
- b'type': b'list',
- b'example': [
- {
- b'type': b'changesetexplicit',
- b'nodes': [b'abcdef...'],
- }
- ],
- },
- b'fields': {
- b'type': b'set',
- b'default': set,
- b'example': {b'parents', b'revision'},
- b'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
- },
- },
- permission=b'pull',
-)
-def changesetdata(repo, proto, revisions, fields):
- # TODO look for unknown fields and abort when they can't be serviced.
- # This could probably be validated by dispatcher using validvalues.
-
- cl = repo.changelog
- outgoing = resolvenodes(repo, revisions)
- publishing = repo.publishing()
-
- if outgoing:
- repo.hook(b'preoutgoing', throw=True, source=b'serve')
-
- yield {
- b'totalitems': len(outgoing),
- }
-
- # The phases of nodes already transferred to the client may have changed
- # since the client last requested data. We send phase-only records
- # for these revisions, if requested.
- # TODO actually do this. We'll probably want to emit phase heads
- # in the ancestry set of the outgoing revisions. This will ensure
- # that phase updates within that set are seen.
- if b'phase' in fields:
- pass
-
- nodebookmarks = {}
- for mark, node in repo._bookmarks.items():
- nodebookmarks.setdefault(node, set()).add(mark)
-
- # It is already topologically sorted by revision number.
- for node in outgoing:
- d = {
- b'node': node,
- }
-
- if b'parents' in fields:
- d[b'parents'] = cl.parents(node)
-
- if b'phase' in fields:
- if publishing:
- d[b'phase'] = b'public'
- else:
- ctx = repo[node]
- d[b'phase'] = ctx.phasestr()
-
- if b'bookmarks' in fields and node in nodebookmarks:
- d[b'bookmarks'] = sorted(nodebookmarks[node])
- del nodebookmarks[node]
-
- followingmeta = []
- followingdata = []
-
- if b'revision' in fields:
- revisiondata = cl.revision(node)
- followingmeta.append((b'revision', len(revisiondata)))
- followingdata.append(revisiondata)
-
- # TODO make it possible for extensions to wrap a function or register
- # a handler to service custom fields.
-
- if followingmeta:
- d[b'fieldsfollowing'] = followingmeta
-
- yield d
-
- for extra in followingdata:
- yield extra
-
- # If requested, send bookmarks from nodes that didn't have revision
- # data sent so receiver is aware of any bookmark updates.
- if b'bookmarks' in fields:
- for node, marks in sorted(pycompat.iteritems(nodebookmarks)):
- yield {
- b'node': node,
- b'bookmarks': sorted(marks),
- }
-
-
-class FileAccessError(Exception):
- """Represents an error accessing a specific file."""
-
- def __init__(self, path, msg, args):
- self.path = path
- self.msg = msg
- self.args = args
-
-
-def getfilestore(repo, proto, path):
- """Obtain a file storage object for use with wire protocol.
-
- Exists as a standalone function so extensions can monkeypatch to add
- access control.
- """
- # This seems to work even if the file doesn't exist. So catch
- # "empty" files and return an error.
- fl = repo.file(path)
-
- if not len(fl):
- raise FileAccessError(path, b'unknown file: %s', (path,))
-
- return fl
-
-
-def emitfilerevisions(repo, path, revisions, linknodes, fields):
- for revision in revisions:
- d = {
- b'node': revision.node,
- }
-
- if b'parents' in fields:
- d[b'parents'] = [revision.p1node, revision.p2node]
-
- if b'linknode' in fields:
- d[b'linknode'] = linknodes[revision.node]
-
- followingmeta = []
- followingdata = []
-
- if b'revision' in fields:
- if revision.revision is not None:
- followingmeta.append((b'revision', len(revision.revision)))
- followingdata.append(revision.revision)
- else:
- d[b'deltabasenode'] = revision.basenode
- followingmeta.append((b'delta', len(revision.delta)))
- followingdata.append(revision.delta)
-
- if followingmeta:
- d[b'fieldsfollowing'] = followingmeta
-
- yield d
-
- for extra in followingdata:
- yield extra
-
-
-def makefilematcher(repo, pathfilter):
- """Construct a matcher from a path filter dict."""
-
- # Validate values.
- if pathfilter:
- for key in (b'include', b'exclude'):
- for pattern in pathfilter.get(key, []):
- if not pattern.startswith((b'path:', b'rootfilesin:')):
- raise error.WireprotoCommandError(
- b'%s pattern must begin with `path:` or `rootfilesin:`; '
- b'got %s',
- (key, pattern),
- )
-
- if pathfilter:
- matcher = matchmod.match(
- repo.root,
- b'',
- include=pathfilter.get(b'include', []),
- exclude=pathfilter.get(b'exclude', []),
- )
- else:
- matcher = matchmod.match(repo.root, b'')
-
- # Requested patterns could include files not in the local store. So
- # filter those out.
- return repo.narrowmatch(matcher)
-
-
-@wireprotocommand(
- b'filedata',
- args={
- b'haveparents': {
- b'type': b'bool',
- b'default': lambda: False,
- b'example': True,
- },
- b'nodes': {
- b'type': b'list',
- b'example': [b'0123456...'],
- },
- b'fields': {
- b'type': b'set',
- b'default': set,
- b'example': {b'parents', b'revision'},
- b'validvalues': {b'parents', b'revision', b'linknode'},
- },
- b'path': {
- b'type': b'bytes',
- b'example': b'foo.txt',
- },
- },
- permission=b'pull',
- # TODO censoring a file revision won't invalidate the cache.
- # Figure out a way to take censoring into account when deriving
- # the cache key.
- cachekeyfn=makecommandcachekeyfn(b'filedata', 1, allargs=True),
-)
-def filedata(repo, proto, haveparents, nodes, fields, path):
- # TODO this API allows access to file revisions that are attached to
- # secret changesets. filesdata does not have this problem. Maybe this
- # API should be deleted?
-
- try:
- # Extensions may wish to access the protocol handler.
- store = getfilestore(repo, proto, path)
- except FileAccessError as e:
- raise error.WireprotoCommandError(e.msg, e.args)
-
- clnode = repo.changelog.node
- linknodes = {}
-
- # Validate requested nodes.
- for node in nodes:
- try:
- store.rev(node)
- except error.LookupError:
- raise error.WireprotoCommandError(
- b'unknown file node: %s', (hex(node),)
- )
-
- # TODO by creating the filectx against a specific file revision
- # instead of changeset, linkrev() is always used. This is wrong for
- # cases where linkrev() may refer to a hidden changeset. But since this
- # API doesn't know anything about changesets, we're not sure how to
- # disambiguate the linknode. Perhaps we should delete this API?
- fctx = repo.filectx(path, fileid=node)
- linknodes[node] = clnode(fctx.introrev())
-
- revisions = store.emitrevisions(
- nodes,
- revisiondata=b'revision' in fields,
- assumehaveparentrevisions=haveparents,
- )
-
- yield {
- b'totalitems': len(nodes),
- }
-
- for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
- yield o
-
-
-def filesdatacapabilities(repo, proto):
- batchsize = repo.ui.configint(
- b'experimental', b'server.filesdata.recommended-batch-size'
- )
- return {
- b'recommendedbatchsize': batchsize,
- }
-
-
-@wireprotocommand(
- b'filesdata',
- args={
- b'haveparents': {
- b'type': b'bool',
- b'default': lambda: False,
- b'example': True,
- },
- b'fields': {
- b'type': b'set',
- b'default': set,
- b'example': {b'parents', b'revision'},
- b'validvalues': {
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision',
- },
- },
- b'pathfilter': {
- b'type': b'dict',
- b'default': lambda: None,
- b'example': {b'include': [b'path:tests']},
- },
- b'revisions': {
- b'type': b'list',
- b'example': [
- {
- b'type': b'changesetexplicit',
- b'nodes': [b'abcdef...'],
- }
- ],
- },
- },
- permission=b'pull',
- # TODO censoring a file revision won't invalidate the cache.
- # Figure out a way to take censoring into account when deriving
- # the cache key.
- cachekeyfn=makecommandcachekeyfn(b'filesdata', 1, allargs=True),
- extracapabilitiesfn=filesdatacapabilities,
-)
-def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
- # TODO This should operate on a repo that exposes obsolete changesets. There
- # is a race between a client making a push that obsoletes a changeset and
- # another client fetching files data for that changeset. If a client has a
- # changeset, it should probably be allowed to access files data for that
- # changeset.
-
- outgoing = resolvenodes(repo, revisions)
- filematcher = makefilematcher(repo, pathfilter)
-
- # path -> {fnode: linknode}
- fnodes = collections.defaultdict(dict)
-
- # We collect the set of relevant file revisions by iterating the changeset
- # revisions and either walking the set of files recorded in the changeset
- # or by walking the manifest at that revision. There is probably room for a
- # storage-level API to request this data, as it can be expensive to compute
- # and would benefit from caching or alternate storage from what revlogs
- # provide.
- for node in outgoing:
- ctx = repo[node]
- mctx = ctx.manifestctx()
- md = mctx.read()
-
- if haveparents:
- checkpaths = ctx.files()
- else:
- checkpaths = md.keys()
-
- for path in checkpaths:
- fnode = md[path]
-
- if path in fnodes and fnode in fnodes[path]:
- continue
-
- if not filematcher(path):
- continue
-
- fnodes[path].setdefault(fnode, node)
-
- yield {
- b'totalpaths': len(fnodes),
- b'totalitems': sum(len(v) for v in fnodes.values()),
- }
-
- for path, filenodes in sorted(fnodes.items()):
- try:
- store = getfilestore(repo, proto, path)
- except FileAccessError as e:
- raise error.WireprotoCommandError(e.msg, e.args)
-
- yield {
- b'path': path,
- b'totalitems': len(filenodes),
- }
-
- revisions = store.emitrevisions(
- filenodes.keys(),
- revisiondata=b'revision' in fields,
- assumehaveparentrevisions=haveparents,
- )
-
- for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
- yield o
-
-
-@wireprotocommand(
- b'heads',
- args={
- b'publiconly': {
- b'type': b'bool',
- b'default': lambda: False,
- b'example': False,
- },
- },
- permission=b'pull',
-)
-def headsv2(repo, proto, publiconly):
- if publiconly:
- repo = repo.filtered(b'immutable')
-
- yield repo.heads()
-
-
-@wireprotocommand(
- b'known',
- args={
- b'nodes': {
- b'type': b'list',
- b'default': list,
- b'example': [b'deadbeef'],
- },
- },
- permission=b'pull',
-)
-def knownv2(repo, proto, nodes):
- result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
- yield result
-
-
-@wireprotocommand(
- b'listkeys',
- args={
- b'namespace': {
- b'type': b'bytes',
- b'example': b'ns',
- },
- },
- permission=b'pull',
-)
-def listkeysv2(repo, proto, namespace):
- keys = repo.listkeys(encoding.tolocal(namespace))
- keys = {
- encoding.fromlocal(k): encoding.fromlocal(v)
- for k, v in pycompat.iteritems(keys)
- }
-
- yield keys
-
-
-@wireprotocommand(
- b'lookup',
- args={
- b'key': {
- b'type': b'bytes',
- b'example': b'foo',
- },
- },
- permission=b'pull',
-)
-def lookupv2(repo, proto, key):
- key = encoding.tolocal(key)
-
- # TODO handle exception.
- node = repo.lookup(key)
-
- yield node
-
-
-def manifestdatacapabilities(repo, proto):
- batchsize = repo.ui.configint(
- b'experimental', b'server.manifestdata.recommended-batch-size'
- )
-
- return {
- b'recommendedbatchsize': batchsize,
- }
-
-
-@wireprotocommand(
- b'manifestdata',
- args={
- b'nodes': {
- b'type': b'list',
- b'example': [b'0123456...'],
- },
- b'haveparents': {
- b'type': b'bool',
- b'default': lambda: False,
- b'example': True,
- },
- b'fields': {
- b'type': b'set',
- b'default': set,
- b'example': {b'parents', b'revision'},
- b'validvalues': {b'parents', b'revision'},
- },
- b'tree': {
- b'type': b'bytes',
- b'example': b'',
- },
- },
- permission=b'pull',
- cachekeyfn=makecommandcachekeyfn(b'manifestdata', 1, allargs=True),
- extracapabilitiesfn=manifestdatacapabilities,
-)
-def manifestdata(repo, proto, haveparents, nodes, fields, tree):
- store = repo.manifestlog.getstorage(tree)
-
- # Validate the node is known and abort on unknown revisions.
- for node in nodes:
- try:
- store.rev(node)
- except error.LookupError:
- raise error.WireprotoCommandError(b'unknown node: %s', (node,))
-
- revisions = store.emitrevisions(
- nodes,
- revisiondata=b'revision' in fields,
- assumehaveparentrevisions=haveparents,
- )
-
- yield {
- b'totalitems': len(nodes),
- }
-
- for revision in revisions:
- d = {
- b'node': revision.node,
- }
-
- if b'parents' in fields:
- d[b'parents'] = [revision.p1node, revision.p2node]
-
- followingmeta = []
- followingdata = []
-
- if b'revision' in fields:
- if revision.revision is not None:
- followingmeta.append((b'revision', len(revision.revision)))
- followingdata.append(revision.revision)
- else:
- d[b'deltabasenode'] = revision.basenode
- followingmeta.append((b'delta', len(revision.delta)))
- followingdata.append(revision.delta)
-
- if followingmeta:
- d[b'fieldsfollowing'] = followingmeta
-
- yield d
-
- for extra in followingdata:
- yield extra
-
-
-@wireprotocommand(
- b'pushkey',
- args={
- b'namespace': {
- b'type': b'bytes',
- b'example': b'ns',
- },
- b'key': {
- b'type': b'bytes',
- b'example': b'key',
- },
- b'old': {
- b'type': b'bytes',
- b'example': b'old',
- },
- b'new': {
- b'type': b'bytes',
- b'example': b'new',
- },
- },
- permission=b'push',
-)
-def pushkeyv2(repo, proto, namespace, key, old, new):
- # TODO handle ui output redirection
- yield repo.pushkey(
- encoding.tolocal(namespace),
- encoding.tolocal(key),
- encoding.tolocal(old),
- encoding.tolocal(new),
- )
-
-
-@wireprotocommand(
- b'rawstorefiledata',
- args={
- b'files': {
- b'type': b'list',
- b'example': [b'changelog', b'manifestlog'],
- },
- b'pathfilter': {
- b'type': b'list',
- b'default': lambda: None,
- b'example': {b'include': [b'path:tests']},
- },
- },
- permission=b'pull',
-)
-def rawstorefiledata(repo, proto, files, pathfilter):
- if not streamclone.allowservergeneration(repo):
- raise error.WireprotoCommandError(b'stream clone is disabled')
-
- # TODO support dynamically advertising what store files "sets" are
- # available. For now, we support changelog, manifestlog, and files.
- files = set(files)
- allowedfiles = {b'changelog', b'manifestlog'}
-
- unsupported = files - allowedfiles
- if unsupported:
- raise error.WireprotoCommandError(
- b'unknown file type: %s', (b', '.join(sorted(unsupported)),)
- )
-
- with repo.lock():
- topfiles = list(repo.store.topfiles())
-
- sendfiles = []
- totalsize = 0
-
- # TODO this is a bunch of storage layer interface abstractions because
- # it assumes revlogs.
- for rl_type, name, size in topfiles:
- # XXX use the `rl_type` for that
- if b'changelog' in files and name.startswith(b'00changelog'):
- pass
- elif b'manifestlog' in files and name.startswith(b'00manifest'):
- pass
- else:
- continue
-
- sendfiles.append((b'store', name, size))
- totalsize += size
-
- yield {
- b'filecount': len(sendfiles),
- b'totalsize': totalsize,
- }
-
- for location, name, size in sendfiles:
- yield {
- b'location': location,
- b'path': name,
- b'size': size,
- }
-
- # We have to use a closure for this to ensure the context manager is
- # closed only after sending the final chunk.
- def getfiledata():
- with repo.svfs(name, b'rb', auditpath=False) as fh:
- for chunk in util.filechunkiter(fh, limit=size):
- yield chunk
-
- yield wireprototypes.indefinitebytestringresponse(getfiledata())
--- a/relnotes/next Fri Feb 18 12:55:39 2022 +0100
+++ b/relnotes/next Fri Feb 18 14:27:43 2022 +0100
@@ -4,16 +4,42 @@
== Default Format Change ==
These changes affects newly created repositories (or new clone) done with
-Mercurial XXX.
+Mercurial 6.1.
+
+The `share-safe` format variant is now enabled by default. It makes
+configuration and requirements more consistent across repository and their
+shares. This introduces a behavior change as shares from a repository using the
+new format will also use their main repository's configuration.
+
+See `hg help config.format.use-share-safe` for details about the feature and
+the available options for auto-upgrading existing shares.
== New Experimental Features ==
== Bug Fixes ==
+The `--no-check` and `--no-merge` now properly overwrite the behavior from `commands.update.check`.
== Backwards Compatibility Changes ==
+The remotefilelog extension now requires an appropiate excludepattern
+for subrepositories.
+
+The labels passed to merge tools have changed slightly. Merge tools can get
+labels passed to them if you include `$labellocal`, `$labelbase`, and/or
+`$labelother` in the `merge-tool.<tool name>.args` configuration. These labels
+used to have some space-padding, and truncation to fit within 72 columns. Both
+the padding and the truncation has been removed.
+
+Some of the text in labels passed to merge tools has changed. For example,
+in conflicts while running `hg histedit`, the labels used to be "local",
+"base", and "histedit". They are now "already edited",
+"parent of current change", and "current change", respectively.
+
+The use of `share-safe`, means shares (of new repositories) will also use their
+main repository's configuration see the `Default Format Change` section
+for details.
== Internal API Changes ==
--- a/rust/Cargo.lock Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/Cargo.lock Fri Feb 18 14:27:43 2022 +0100
@@ -1,5 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
+version = 3
+
[[package]]
name = "adler"
version = "0.2.3"
@@ -314,21 +316,19 @@
[[package]]
name = "format-bytes"
-version = "0.2.2"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c4e89040c7fd7b4e6ba2820ac705a45def8a0c098ec78d170ae88f1ef1d5762"
+checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd"
dependencies = [
"format-bytes-macros",
- "proc-macro-hack",
]
[[package]]
name = "format-bytes-macros"
-version = "0.3.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b05089e341a0460449e2210c3bf7b61597860b07f0deae58da38dbed0a4c6b6d"
+checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f"
dependencies = [
- "proc-macro-hack",
"proc-macro2",
"quote",
"syn",
@@ -356,6 +356,17 @@
]
[[package]]
+name = "getrandom"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+]
+
+[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -371,6 +382,12 @@
]
[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
name = "hg-core"
version = "0.1.0"
dependencies = [
@@ -391,7 +408,7 @@
"memmap2",
"micro-timer",
"pretty_assertions",
- "rand",
+ "rand 0.8.4",
"rand_distr",
"rand_pcg",
"rayon",
@@ -415,6 +432,7 @@
"libc",
"log",
"stable_deref_trait",
+ "vcsgraph",
]
[[package]]
@@ -442,7 +460,7 @@
checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
dependencies = [
"bitmaps",
- "rand_core",
+ "rand_core 0.5.1",
"rand_xoshiro",
"sized-chunks",
"typenum",
@@ -480,6 +498,12 @@
checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
[[package]]
+name = "libm"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
+
+[[package]]
name = "libz-sys"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -579,6 +603,7 @@
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg",
+ "libm",
]
[[package]]
@@ -637,12 +662,6 @@
]
[[package]]
-name = "proc-macro-hack"
-version = "0.5.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-
-[[package]]
name = "proc-macro2"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -692,11 +711,23 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
- "getrandom",
+ "getrandom 0.1.15",
"libc",
- "rand_chacha",
- "rand_core",
- "rand_hc",
+ "rand_chacha 0.2.2",
+ "rand_core 0.5.1",
+ "rand_hc 0.2.0",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc",
+ "rand_chacha 0.3.1",
+ "rand_core 0.6.3",
+ "rand_hc 0.3.1",
]
[[package]]
@@ -706,7 +737,17 @@
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
- "rand_core",
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.6.3",
]
[[package]]
@@ -715,16 +756,26 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
- "getrandom",
+ "getrandom 0.1.15",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom 0.2.4",
]
[[package]]
name = "rand_distr"
-version = "0.2.2"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
+checksum = "964d548f8e7d12e102ef183a0de7e98180c9f8729f555897a857b96e48122d2f"
dependencies = [
- "rand",
+ "num-traits",
+ "rand 0.8.4",
]
[[package]]
@@ -733,16 +784,25 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
- "rand_core",
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core 0.6.3",
]
[[package]]
name = "rand_pcg"
-version = "0.2.1"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
+checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
dependencies = [
- "rand_core",
+ "rand_core 0.6.3",
]
[[package]]
@@ -751,7 +811,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
dependencies = [
- "rand_core",
+ "rand_core 0.5.1",
]
[[package]]
@@ -816,6 +876,7 @@
name = "rhg"
version = "0.1.0"
dependencies = [
+ "atty",
"chrono",
"clap",
"derive_more",
@@ -905,7 +966,7 @@
dependencies = [
"cfg-if 0.1.10",
"libc",
- "rand",
+ "rand 0.7.3",
"redox_syscall",
"remove_dir_all",
"winapi",
@@ -956,7 +1017,7 @@
checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
dependencies = [
"cfg-if 0.1.10",
- "rand",
+ "rand 0.7.3",
"static_assertions",
]
@@ -995,6 +1056,17 @@
checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
[[package]]
+name = "vcsgraph"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7"
+dependencies = [
+ "hex",
+ "rand 0.7.3",
+ "sha-1",
+]
+
+[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/rust/hg-core/Cargo.toml Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/Cargo.toml Fri Feb 18 14:27:43 2022 +0100
@@ -18,9 +18,9 @@
itertools = "0.9"
lazy_static = "1.4.0"
libc = "0.2"
-rand = "0.7.3"
-rand_pcg = "0.2.1"
-rand_distr = "0.2.2"
+rand = "0.8.4"
+rand_pcg = "0.3.1"
+rand_distr = "0.4.2"
rayon = "1.3.0"
regex = "1.3.9"
sha-1 = "0.9.6"
@@ -33,7 +33,7 @@
log = "0.4.8"
memmap2 = {version = "0.4", features = ["stable_deref_trait"]}
zstd = "0.5.3"
-format-bytes = "0.2.2"
+format-bytes = "0.3.0"
# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
# we have a clearer view of which backend is the fastest.
--- a/rust/hg-core/src/ancestors.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/ancestors.rs Fri Feb 18 14:27:43 2022 +0100
@@ -26,15 +26,6 @@
stoprev: Revision,
}
-/// Lazy ancestors set, backed by AncestorsIterator
-pub struct LazyAncestors<G: Graph + Clone> {
- graph: G,
- containsiter: AncestorsIterator<G>,
- initrevs: Vec<Revision>,
- stoprev: Revision,
- inclusive: bool,
-}
-
pub struct MissingAncestors<G: Graph> {
graph: G,
bases: HashSet<Revision>,
@@ -165,49 +156,6 @@
}
}
-impl<G: Graph + Clone> LazyAncestors<G> {
- pub fn new(
- graph: G,
- initrevs: impl IntoIterator<Item = Revision>,
- stoprev: Revision,
- inclusive: bool,
- ) -> Result<Self, GraphError> {
- let v: Vec<Revision> = initrevs.into_iter().collect();
- Ok(LazyAncestors {
- graph: graph.clone(),
- containsiter: AncestorsIterator::new(
- graph,
- v.iter().cloned(),
- stoprev,
- inclusive,
- )?,
- initrevs: v,
- stoprev,
- inclusive,
- })
- }
-
- pub fn contains(&mut self, rev: Revision) -> Result<bool, GraphError> {
- self.containsiter.contains(rev)
- }
-
- pub fn is_empty(&self) -> bool {
- self.containsiter.is_empty()
- }
-
- pub fn iter(&self) -> AncestorsIterator<G> {
- // the arguments being the same as for self.containsiter, we know
- // for sure that AncestorsIterator constructor can't fail
- AncestorsIterator::new(
- self.graph.clone(),
- self.initrevs.iter().cloned(),
- self.stoprev,
- self.inclusive,
- )
- .unwrap()
- }
-}
-
impl<G: Graph> MissingAncestors<G> {
pub fn new(graph: G, bases: impl IntoIterator<Item = Revision>) -> Self {
let mut created = MissingAncestors {
@@ -550,39 +498,6 @@
}
#[test]
- fn test_lazy_iter_contains() {
- let mut lazy =
- LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap();
-
- let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect();
- // compare with iterator tests on the same initial revisions
- assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]);
-
- // contains() results are correct, unaffected by the fact that
- // we consumed entirely an iterator out of lazy
- assert_eq!(lazy.contains(2), Ok(true));
- assert_eq!(lazy.contains(9), Ok(false));
- }
-
- #[test]
- fn test_lazy_contains_iter() {
- let mut lazy =
- LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap(); // reminder: [8, 7, 4, 3, 2, 1, 0]
-
- assert_eq!(lazy.contains(2), Ok(true));
- assert_eq!(lazy.contains(6), Ok(false));
-
- // after consumption of 2 by the inner iterator, results stay
- // consistent
- assert_eq!(lazy.contains(2), Ok(true));
- assert_eq!(lazy.contains(5), Ok(false));
-
- // iter() still gives us a fresh iterator
- let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect();
- assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]);
- }
-
- #[test]
/// Test constructor, add/get bases and heads
fn test_missing_bases() -> Result<(), GraphError> {
let mut missing_ancestors =
--- a/rust/hg-core/src/config.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/config.rs Fri Feb 18 14:27:43 2022 +0100
@@ -13,4 +13,4 @@
mod layer;
mod values;
pub use config::{Config, ConfigSource, ConfigValueParseError};
-pub use layer::{ConfigError, ConfigParseError};
+pub use layer::{ConfigError, ConfigOrigin, ConfigParseError};
--- a/rust/hg-core/src/config/config.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/config/config.rs Fri Feb 18 14:27:43 2022 +0100
@@ -84,6 +84,11 @@
}
impl Config {
+ /// The configuration to use when printing configuration-loading errors
+ pub fn empty() -> Self {
+ Self { layers: Vec::new() }
+ }
+
/// Load system and user configuration from various files.
///
/// This is also affected by some environment variables.
@@ -133,13 +138,19 @@
Ok(config)
}
- pub fn load_cli_args_config(
+ pub fn load_cli_args(
&mut self,
cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+ color_arg: Option<Vec<u8>>,
) -> Result<(), ConfigError> {
if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
self.layers.push(layer)
}
+ if let Some(arg) = color_arg {
+ let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
+ layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
+ self.layers.push(layer)
+ }
Ok(())
}
@@ -361,6 +372,15 @@
Ok(self.get_option(section, item)?.unwrap_or(false))
}
+ /// Returns `true` if the extension is enabled, `false` otherwise
+ pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
+ let value = self.get(b"extensions", extension);
+ match value {
+ Some(c) => !c.starts_with(b"!"),
+ None => false,
+ }
+ }
+
/// If there is an `item` value in `section`, parse and return a list of
/// byte strings.
pub fn get_list(
@@ -377,6 +397,16 @@
.map(|(_, value)| value.bytes.as_ref())
}
+ /// Returns the raw value bytes of the first one found, or `None`.
+ pub fn get_with_origin(
+ &self,
+ section: &[u8],
+ item: &[u8],
+ ) -> Option<(&[u8], &ConfigOrigin)> {
+ self.get_inner(section, item)
+ .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
+ }
+
/// Returns the layer and the value of the first one found, or `None`.
fn get_inner(
&self,
@@ -402,6 +432,66 @@
.collect()
}
+ /// Returns whether any key is defined in the given section
+ pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
+ self.layers
+ .iter()
+ .any(|layer| layer.has_non_empty_section(section))
+ }
+
+ /// Yields (key, value) pairs for everything in the given section
+ pub fn iter_section<'a>(
+ &'a self,
+ section: &'a [u8],
+ ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
+ // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is
+ // available:
+ // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut
+ struct Peekable<I: Iterator> {
+ iter: I,
+ /// Remember a peeked value, even if it was None.
+ peeked: Option<Option<I::Item>>,
+ }
+
+ impl<I: Iterator> Peekable<I> {
+ fn new(iter: I) -> Self {
+ Self { iter, peeked: None }
+ }
+
+ fn next(&mut self) {
+ self.peeked = None
+ }
+
+ fn peek_mut(&mut self) -> Option<&mut I::Item> {
+ let iter = &mut self.iter;
+ self.peeked.get_or_insert_with(|| iter.next()).as_mut()
+ }
+ }
+
+ // Deduplicate keys redefined in multiple layers
+ let mut keys_already_seen = HashSet::new();
+ let mut key_is_new =
+ move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
+ keys_already_seen.insert(key)
+ };
+ // This is similar to `flat_map` + `filter_map`, except with a single
+ // closure that owns `key_is_new` (and therefore the
+ // `keys_already_seen` set):
+ let mut layer_iters = Peekable::new(
+ self.layers
+ .iter()
+ .rev()
+ .map(move |layer| layer.iter_section(section)),
+ );
+ std::iter::from_fn(move || loop {
+ if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
+ return Some(pair);
+ } else {
+ layer_iters.next();
+ }
+ })
+ }
+
/// Get raw values bytes from all layers (even untrusted ones) in order
/// of precedence.
#[cfg(test)]
--- a/rust/hg-core/src/config/layer.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/config/layer.rs Fri Feb 18 14:27:43 2022 +0100
@@ -127,6 +127,24 @@
.flat_map(|section| section.keys().map(|vec| &**vec))
}
+ /// Returns the (key, value) pairs defined in the given section
+ pub fn iter_section<'layer>(
+ &'layer self,
+ section: &[u8],
+ ) -> impl Iterator<Item = (&'layer [u8], &'layer [u8])> {
+ self.sections
+ .get(section)
+ .into_iter()
+ .flat_map(|section| section.iter().map(|(k, v)| (&**k, &*v.bytes)))
+ }
+
+ /// Returns whether any key is defined in the given section
+ pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
+ self.sections
+ .get(section)
+ .map_or(false, |section| !section.is_empty())
+ }
+
pub fn is_empty(&self) -> bool {
self.sections.is_empty()
}
@@ -277,16 +295,17 @@
pub line: Option<usize>,
}
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ConfigOrigin {
/// From a configuration file
File(PathBuf),
/// From a `--config` CLI argument
CommandLine,
+ /// From a `--color` CLI argument
+ CommandLineColor,
/// From environment variables like `$PAGER` or `$EDITOR`
Environment(Vec<u8>),
- /* TODO cli
- * TODO defaults (configitems.py)
+ /* TODO defaults (configitems.py)
* TODO extensions
* TODO Python resources?
* Others? */
@@ -300,6 +319,7 @@
match self {
ConfigOrigin::File(p) => out.write_all(&get_bytes_from_path(p)),
ConfigOrigin::CommandLine => out.write_all(b"--config"),
+ ConfigOrigin::CommandLineColor => out.write_all(b"--color"),
ConfigOrigin::Environment(e) => write_bytes!(out, b"${}", e),
}
}
--- a/rust/hg-core/src/dirstate/entry.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate/entry.rs Fri Feb 18 14:27:43 2022 +0100
@@ -43,6 +43,10 @@
truncated_seconds: u32,
/// Always in the `0 .. 1_000_000_000` range.
nanoseconds: u32,
+ /// TODO this should be in DirstateEntry, but the current code needs
+ /// refactoring to use DirstateEntry instead of TruncatedTimestamp for
+ /// comparison.
+ pub second_ambiguous: bool,
}
impl TruncatedTimestamp {
@@ -50,11 +54,16 @@
/// and truncate the seconds components to its lower 31 bits.
///
/// Panics if the nanoseconds components is not in the expected range.
- pub fn new_truncate(seconds: i64, nanoseconds: u32) -> Self {
+ pub fn new_truncate(
+ seconds: i64,
+ nanoseconds: u32,
+ second_ambiguous: bool,
+ ) -> Self {
assert!(nanoseconds < NSEC_PER_SEC);
Self {
truncated_seconds: seconds as u32 & RANGE_MASK_31BIT,
nanoseconds,
+ second_ambiguous,
}
}
@@ -63,6 +72,7 @@
pub fn from_already_truncated(
truncated_seconds: u32,
nanoseconds: u32,
+ second_ambiguous: bool,
) -> Result<Self, DirstateV2ParseError> {
if truncated_seconds & !RANGE_MASK_31BIT == 0
&& nanoseconds < NSEC_PER_SEC
@@ -70,12 +80,17 @@
Ok(Self {
truncated_seconds,
nanoseconds,
+ second_ambiguous,
})
} else {
Err(DirstateV2ParseError)
}
}
+ /// Returns a `TruncatedTimestamp` for the modification time of `metadata`.
+ ///
+ /// Propagates errors from `std` on platforms where modification time
+ /// is not available at all.
pub fn for_mtime_of(metadata: &fs::Metadata) -> io::Result<Self> {
#[cfg(unix)]
{
@@ -83,7 +98,7 @@
let seconds = metadata.mtime();
// i64 -> u32 with value always in the `0 .. NSEC_PER_SEC` range
let nanoseconds = metadata.mtime_nsec().try_into().unwrap();
- Ok(Self::new_truncate(seconds, nanoseconds))
+ Ok(Self::new_truncate(seconds, nanoseconds, false))
}
#[cfg(not(unix))]
{
@@ -91,6 +106,47 @@
}
}
+ /// Like `for_mtime_of`, but may return `None` or a value with
+ /// `second_ambiguous` set if the mtime is not "reliable".
+ ///
+ /// A modification time is reliable if it is older than `boundary` (or
+ /// sufficiently in the future).
+ ///
+ /// Otherwise a concurrent modification might happens with the same mtime.
+ pub fn for_reliable_mtime_of(
+ metadata: &fs::Metadata,
+ boundary: &Self,
+ ) -> io::Result<Option<Self>> {
+ let mut mtime = Self::for_mtime_of(metadata)?;
+ // If the mtime of the ambiguous file is younger (or equal) to the
+ // starting point of the `status` walk, we cannot garantee that
+ // another, racy, write will not happen right after with the same mtime
+ // and we cannot cache the information.
+ //
+ // However if the mtime is far away in the future, this is likely some
+ // mismatch between the current clock and previous file system
+ // operation. So mtime more than one days in the future are considered
+ // fine.
+ let reliable = if mtime.truncated_seconds == boundary.truncated_seconds
+ {
+ mtime.second_ambiguous = true;
+ mtime.nanoseconds != 0
+ && boundary.nanoseconds != 0
+ && mtime.nanoseconds < boundary.nanoseconds
+ } else {
+ // `truncated_seconds` is less than 2**31,
+ // so this does not overflow `u32`:
+ let one_day_later = boundary.truncated_seconds + 24 * 3600;
+ mtime.truncated_seconds < boundary.truncated_seconds
+ || mtime.truncated_seconds > one_day_later
+ };
+ if reliable {
+ Ok(Some(mtime))
+ } else {
+ Ok(None)
+ }
+ }
+
/// The lower 31 bits of the number of seconds since the epoch.
pub fn truncated_seconds(&self) -> u32 {
self.truncated_seconds
@@ -122,10 +178,17 @@
/// in that way, doing a simple comparison would cause many false
/// negatives.
pub fn likely_equal(self, other: Self) -> bool {
- self.truncated_seconds == other.truncated_seconds
- && (self.nanoseconds == other.nanoseconds
- || self.nanoseconds == 0
- || other.nanoseconds == 0)
+ if self.truncated_seconds != other.truncated_seconds {
+ false
+ } else if self.nanoseconds == 0 || other.nanoseconds == 0 {
+ if self.second_ambiguous {
+ false
+ } else {
+ true
+ }
+ } else {
+ self.nanoseconds == other.nanoseconds
+ }
}
pub fn likely_equal_to_mtime_of(
@@ -168,12 +231,12 @@
}
}
};
- Self::new_truncate(seconds, nanoseconds)
+ Self::new_truncate(seconds, nanoseconds, false)
}
}
const NSEC_PER_SEC: u32 = 1_000_000_000;
-const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
+pub const RANGE_MASK_31BIT: u32 = 0x7FFF_FFFF;
pub const MTIME_UNSET: i32 = -1;
@@ -258,9 +321,10 @@
let mode = u32::try_from(mode).unwrap();
let size = u32::try_from(size).unwrap();
let mtime = u32::try_from(mtime).unwrap();
- let mtime =
- TruncatedTimestamp::from_already_truncated(mtime, 0)
- .unwrap();
+ let mtime = TruncatedTimestamp::from_already_truncated(
+ mtime, 0, false,
+ )
+ .unwrap();
Self {
flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
mode_size: Some((mode, size)),
@@ -438,7 +502,11 @@
} else if !self.flags.contains(Flags::P1_TRACKED) {
MTIME_UNSET
} else if let Some(mtime) = self.mtime {
- i32::try_from(mtime.truncated_seconds()).unwrap()
+ if mtime.second_ambiguous {
+ MTIME_UNSET
+ } else {
+ i32::try_from(mtime.truncated_seconds()).unwrap()
+ }
} else {
MTIME_UNSET
}
@@ -580,10 +648,8 @@
&self,
filesystem_metadata: &std::fs::Metadata,
) -> bool {
- use std::os::unix::fs::MetadataExt;
- const EXEC_BIT_MASK: u32 = 0o100;
- let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
- let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
+ let dirstate_exec_bit = (self.mode() as u32 & EXEC_BIT_MASK) != 0;
+ let fs_exec_bit = has_exec_bit(filesystem_metadata);
dirstate_exec_bit != fs_exec_bit
}
@@ -592,16 +658,6 @@
pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
(self.state().into(), self.mode(), self.size(), self.mtime())
}
-
- /// True if the stored mtime would be ambiguous with the current time
- pub fn need_delay(&self, now: TruncatedTimestamp) -> bool {
- if let Some(mtime) = self.mtime {
- self.state() == EntryState::Normal
- && mtime.truncated_seconds() == now.truncated_seconds()
- } else {
- false
- }
- }
}
impl EntryState {
@@ -641,3 +697,11 @@
}
}
}
+
+const EXEC_BIT_MASK: u32 = 0o100;
+
+pub fn has_exec_bit(metadata: &std::fs::Metadata) -> bool {
+ // TODO: How to handle executable permissions on Windows?
+ use std::os::unix::fs::MetadataExt;
+ (metadata.mode() & EXEC_BIT_MASK) != 0
+}
--- a/rust/hg-core/src/dirstate/status.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate/status.rs Fri Feb 18 14:27:43 2022 +0100
@@ -9,10 +9,9 @@
//! It is currently missing a lot of functionality compared to the Python one
//! and will only be triggered in narrow cases.
+use crate::dirstate::entry::TruncatedTimestamp;
use crate::dirstate_tree::on_disk::DirstateV2ParseError;
-
use crate::{
- dirstate::TruncatedTimestamp,
utils::hg_path::{HgPath, HgPathError},
PatternError,
};
@@ -62,46 +61,48 @@
#[derive(Debug, Copy, Clone)]
pub struct StatusOptions {
- /// Remember the most recent modification timeslot for status, to make
- /// sure we won't miss future size-preserving file content modifications
- /// that happen within the same timeslot.
- pub last_normal_time: TruncatedTimestamp,
/// Whether we are on a filesystem with UNIX-like exec flags
pub check_exec: bool,
pub list_clean: bool,
pub list_unknown: bool,
pub list_ignored: bool,
+ /// Whether to populate `StatusPath::copy_source`
+ pub list_copies: bool,
/// Whether to collect traversed dirs for applying a callback later.
/// Used by `hg purge` for example.
pub collect_traversed_dirs: bool,
}
-#[derive(Debug, Default)]
+#[derive(Default)]
pub struct DirstateStatus<'a> {
+ /// The current time at the start of the `status()` algorithm, as measured
+ /// and possibly truncated by the filesystem.
+ pub filesystem_time_at_status_start: Option<TruncatedTimestamp>,
+
/// Tracked files whose contents have changed since the parent revision
- pub modified: Vec<HgPathCow<'a>>,
+ pub modified: Vec<StatusPath<'a>>,
/// Newly-tracked files that were not present in the parent
- pub added: Vec<HgPathCow<'a>>,
+ pub added: Vec<StatusPath<'a>>,
/// Previously-tracked files that have been (re)moved with an hg command
- pub removed: Vec<HgPathCow<'a>>,
+ pub removed: Vec<StatusPath<'a>>,
/// (Still) tracked files that are missing, (re)moved with an non-hg
/// command
- pub deleted: Vec<HgPathCow<'a>>,
+ pub deleted: Vec<StatusPath<'a>>,
/// Tracked files that are up to date with the parent.
/// Only pupulated if `StatusOptions::list_clean` is true.
- pub clean: Vec<HgPathCow<'a>>,
+ pub clean: Vec<StatusPath<'a>>,
/// Files in the working directory that are ignored with `.hgignore`.
/// Only pupulated if `StatusOptions::list_ignored` is true.
- pub ignored: Vec<HgPathCow<'a>>,
+ pub ignored: Vec<StatusPath<'a>>,
/// Files in the working directory that are neither tracked nor ignored.
/// Only pupulated if `StatusOptions::list_unknown` is true.
- pub unknown: Vec<HgPathCow<'a>>,
+ pub unknown: Vec<StatusPath<'a>>,
/// Was explicitly matched but cannot be found/accessed
pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
@@ -109,7 +110,7 @@
/// Either clean or modified, but we can’t tell from filesystem metadata
/// alone. The file contents need to be read and compared with that in
/// the parent.
- pub unsure: Vec<HgPathCow<'a>>,
+ pub unsure: Vec<StatusPath<'a>>,
/// Only filled if `collect_traversed_dirs` is `true`
pub traversed: Vec<HgPathCow<'a>>,
@@ -119,10 +120,14 @@
pub dirty: bool,
}
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub struct StatusPath<'a> {
+ pub path: HgPathCow<'a>,
+ pub copy_source: Option<HgPathCow<'a>>,
+}
+
#[derive(Debug, derive_more::From)]
pub enum StatusError {
- /// Generic IO error
- IO(std::io::Error),
/// An invalid path that cannot be represented in Mercurial was found
Path(HgPathError),
/// An invalid "ignore" pattern was found
@@ -136,7 +141,6 @@
impl fmt::Display for StatusError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
- StatusError::IO(error) => error.fmt(f),
StatusError::Path(error) => error.fmt(f),
StatusError::Pattern(error) => error.fmt(f),
StatusError::DirstateV2ParseError(_) => {
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Fri Feb 18 14:27:43 2022 +0100
@@ -309,6 +309,25 @@
NodeRef::OnDisk(node) => node.copy_source(on_disk),
}
}
+ /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
+ /// HgPath>` detached from `'tree`
+ pub(super) fn copy_source_borrowed(
+ &self,
+ on_disk: &'on_disk [u8],
+ ) -> Result<Option<BorrowedPath<'tree, 'on_disk>>, DirstateV2ParseError>
+ {
+ Ok(match self {
+ NodeRef::InMemory(_path, node) => {
+ node.copy_source.as_ref().map(|source| match source {
+ Cow::Borrowed(on_disk) => BorrowedPath::OnDisk(on_disk),
+ Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
+ })
+ }
+ NodeRef::OnDisk(node) => node
+ .copy_source(on_disk)?
+ .map(|source| BorrowedPath::OnDisk(source)),
+ })
+ }
pub(super) fn entry(
&self,
@@ -677,25 +696,6 @@
})
}
- fn clear_known_ambiguous_mtimes(
- &mut self,
- paths: &[impl AsRef<HgPath>],
- ) -> Result<(), DirstateV2ParseError> {
- for path in paths {
- if let Some(node) = Self::get_node_mut(
- self.on_disk,
- &mut self.unreachable_bytes,
- &mut self.root,
- path.as_ref(),
- )? {
- if let NodeData::Entry(entry) = &mut node.data {
- entry.set_possibly_dirty();
- }
- }
- }
- Ok(())
- }
-
fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
if let Cow::Borrowed(path) = path {
*unreachable_bytes += path.len() as u32
@@ -928,31 +928,22 @@
#[timed]
pub fn pack_v1(
- &mut self,
+ &self,
parents: DirstateParents,
- now: TruncatedTimestamp,
) -> Result<Vec<u8>, DirstateError> {
- let map = self.get_map_mut();
- let mut ambiguous_mtimes = Vec::new();
+ let map = self.get_map();
// Optizimation (to be measured?): pre-compute size to avoid `Vec`
// reallocations
let mut size = parents.as_bytes().len();
for node in map.iter_nodes() {
let node = node?;
- if let Some(entry) = node.entry()? {
+ if node.entry()?.is_some() {
size += packed_entry_size(
node.full_path(map.on_disk)?,
node.copy_source(map.on_disk)?,
);
- if entry.need_delay(now) {
- ambiguous_mtimes.push(
- node.full_path_borrowed(map.on_disk)?
- .detach_from_tree(),
- )
- }
}
}
- map.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
let mut packed = Vec::with_capacity(size);
packed.extend(parents.as_bytes());
@@ -977,27 +968,10 @@
/// (false).
#[timed]
pub fn pack_v2(
- &mut self,
- now: TruncatedTimestamp,
+ &self,
can_append: bool,
- ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
- let map = self.get_map_mut();
- let mut paths = Vec::new();
- for node in map.iter_nodes() {
- let node = node?;
- if let Some(entry) = node.entry()? {
- if entry.need_delay(now) {
- paths.push(
- node.full_path_borrowed(map.on_disk)?
- .detach_from_tree(),
- )
- }
- }
- }
- // Borrow of `self` ends here since we collect cloned paths
-
- map.clear_known_ambiguous_mtimes(&paths)?;
-
+ ) -> Result<(Vec<u8>, on_disk::TreeMetadata, bool), DirstateError> {
+ let map = self.get_map();
on_disk::write(map, can_append)
}
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate_tree/on_disk.rs Fri Feb 18 14:27:43 2022 +0100
@@ -14,8 +14,10 @@
use bytes_cast::unaligned::{U16Be, U32Be};
use bytes_cast::BytesCast;
use format_bytes::format_bytes;
+use rand::Rng;
use std::borrow::Cow;
use std::convert::{TryFrom, TryInto};
+use std::fmt::Write;
/// Added at the start of `.hg/dirstate` when the "v2" format is used.
/// This a redundant sanity check more than an actual "magic number" since
@@ -61,14 +63,14 @@
pub struct Docket<'on_disk> {
header: &'on_disk DocketHeader,
- uuid: &'on_disk [u8],
+ pub uuid: &'on_disk [u8],
}
/// Fields are documented in the *Tree metadata in the docket file*
/// section of `mercurial/helptext/internals/dirstate-v2.txt`
#[derive(BytesCast)]
#[repr(C)]
-struct TreeMetadata {
+pub struct TreeMetadata {
root_nodes: ChildNodes,
nodes_with_entry_count: Size,
nodes_with_copy_source_count: Size,
@@ -186,7 +188,51 @@
}
}
+impl TreeMetadata {
+ pub fn as_bytes(&self) -> &[u8] {
+ BytesCast::as_bytes(self)
+ }
+}
+
impl<'on_disk> Docket<'on_disk> {
+ /// Generate the identifier for a new data file
+ ///
+ /// TODO: support the `HGTEST_UUIDFILE` environment variable.
+ /// See `mercurial/revlogutils/docket.py`
+ pub fn new_uid() -> String {
+ const ID_LENGTH: usize = 8;
+ let mut id = String::with_capacity(ID_LENGTH);
+ let mut rng = rand::thread_rng();
+ for _ in 0..ID_LENGTH {
+ // One random hexadecimal digit.
+ // `unwrap` never panics because `impl Write for String`
+ // never returns an error.
+ write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
+ }
+ id
+ }
+
+ pub fn serialize(
+ parents: DirstateParents,
+ tree_metadata: TreeMetadata,
+ data_size: u64,
+ uuid: &[u8],
+ ) -> Result<Vec<u8>, std::num::TryFromIntError> {
+ let header = DocketHeader {
+ marker: *V2_FORMAT_MARKER,
+ parent_1: parents.p1.pad_to_256_bits(),
+ parent_2: parents.p2.pad_to_256_bits(),
+ metadata: tree_metadata,
+ data_size: u32::try_from(data_size)?.into(),
+ uuid_size: uuid.len().try_into()?,
+ };
+ let header = header.as_bytes();
+ let mut docket = Vec::with_capacity(header.len() + uuid.len());
+ docket.extend_from_slice(header);
+ docket.extend_from_slice(uuid);
+ Ok(docket)
+ }
+
pub fn parents(&self) -> DirstateParents {
use crate::Node;
let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
@@ -336,7 +382,7 @@
&& self.flags().contains(Flags::HAS_MTIME)
&& self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
{
- Ok(Some(self.mtime.try_into()?))
+ Ok(Some(self.mtime()?))
} else {
Ok(None)
}
@@ -356,6 +402,14 @@
(file_type | permisions).into()
}
+ fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
+ let mut m: TruncatedTimestamp = self.mtime.try_into()?;
+ if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
+ m.second_ambiguous = true;
+ }
+ Ok(m)
+ }
+
fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
// TODO: convert through raw bits instead?
let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED);
@@ -371,11 +425,8 @@
let mtime = if self.flags().contains(Flags::HAS_MTIME)
&& !self.flags().contains(Flags::DIRECTORY)
&& !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
- // The current code is not able to do the more subtle comparison that the
- // MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime
- && !self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS)
{
- Some(self.mtime.try_into()?)
+ Some(self.mtime()?)
} else {
None
};
@@ -465,6 +516,9 @@
};
let mtime = if let Some(m) = mtime_opt {
flags.insert(Flags::HAS_MTIME);
+ if m.second_ambiguous {
+ flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
+ };
m.into()
} else {
PackedTruncatedTimestamp::null()
@@ -549,9 +603,9 @@
/// `dirstate_map.on_disk` (true), instead of written to a new data file
/// (false).
pub(super) fn write(
- dirstate_map: &mut DirstateMap,
+ dirstate_map: &DirstateMap,
can_append: bool,
-) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
+) -> Result<(Vec<u8>, TreeMetadata, bool), DirstateError> {
let append = can_append && dirstate_map.write_should_append();
// This ignores the space for paths, and for nodes without an entry.
@@ -577,7 +631,7 @@
unused: [0; 4],
ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
};
- Ok((writer.out, meta.as_bytes().to_vec(), append))
+ Ok((writer.out, meta, append))
}
struct Writer<'dmap, 'on_disk> {
@@ -631,7 +685,7 @@
dirstate_map::NodeData::Entry(entry) => {
Node::from_dirstate_entry(entry)
}
- dirstate_map::NodeData::CachedDirectory { mtime } => (
+ dirstate_map::NodeData::CachedDirectory { mtime } => {
// we currently never set a mtime if unknown file
// are present.
// So if we have a mtime for a directory, we know
@@ -642,12 +696,14 @@
// We never set ALL_IGNORED_RECORDED since we
// don't track that case
// currently.
- Flags::DIRECTORY
+ let mut flags = Flags::DIRECTORY
| Flags::HAS_MTIME
- | Flags::ALL_UNKNOWN_RECORDED,
- 0.into(),
- (*mtime).into(),
- ),
+ | Flags::ALL_UNKNOWN_RECORDED;
+ if mtime.second_ambiguous {
+ flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
+ }
+ (flags, 0.into(), (*mtime).into())
+ }
dirstate_map::NodeData::None => (
Flags::DIRECTORY,
0.into(),
@@ -773,6 +829,7 @@
Self::from_already_truncated(
timestamp.truncated_seconds.get(),
timestamp.nanoseconds.get(),
+ false,
)
}
}
--- a/rust/hg-core/src/dirstate_tree/owning.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate_tree/owning.rs Fri Feb 18 14:27:43 2022 +0100
@@ -21,7 +21,7 @@
/// language cannot represent a lifetime referencing a sibling field.
/// This is not quite a self-referencial struct (moving this struct is not
/// a problem as it doesn’t change the address of the bytes buffer owned
- /// by `PyBytes`) but touches similar borrow-checker limitations.
+ /// by `on_disk`) but touches similar borrow-checker limitations.
ptr: *mut (),
}
@@ -50,13 +50,13 @@
// SAFETY: We cast the type-erased pointer back to the same type it had
// in `new`, except with a different lifetime parameter. This time we
// connect the lifetime to that of `self`. This cast is valid because
- // `self` owns the same `PyBytes` whose buffer `DirstateMap`
- // references. That buffer has a stable memory address because the byte
- // string value of a `PyBytes` is immutable.
+ // `self` owns the same `on_disk` whose buffer `DirstateMap`
+ // references. That buffer has a stable memory address because our
+ // `Self::new_empty` counstructor requires `StableDeref`.
let ptr: *mut DirstateMap<'a> = self.ptr.cast();
// SAFETY: we dereference that pointer, connecting the lifetime of the
- // new `&mut` to that of `self`. This is valid because the
- // raw pointer is to a boxed value, and `self` owns that box.
+ // new `&mut` to that of `self`. This is valid because the
+ // raw pointer is to a boxed value, and `self` owns that box.
(&self.on_disk, unsafe { &mut *ptr })
}
@@ -65,7 +65,7 @@
}
pub fn get_map<'a>(&'a self) -> &'a DirstateMap<'a> {
- // SAFETY: same reasoning as in `get_mut` above.
+ // SAFETY: same reasoning as in `get_pair_mut` above.
let ptr: *mut DirstateMap<'a> = self.ptr.cast();
unsafe { &*ptr }
}
@@ -79,13 +79,13 @@
fn drop(&mut self) {
// Silence a "field is never read" warning, and demonstrate that this
// value is still alive.
- let _ = &self.on_disk;
+ let _: &Box<dyn Deref<Target = [u8]> + Send> = &self.on_disk;
// SAFETY: this cast is the same as in `get_mut`, and is valid for the
// same reason. `self.on_disk` still exists at this point, drop glue
// will drop it implicitly after this `drop` method returns.
let ptr: *mut DirstateMap<'_> = self.ptr.cast();
// SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
- // This is fine because drop glue does nothig for `*mut ()` and we’re
+ // This is fine because drop glue does nothing for `*mut ()` and we’re
// in `drop`, so `get` and `get_mut` cannot be called again.
unsafe { drop(Box::from_raw(ptr)) }
}
--- a/rust/hg-core/src/dirstate_tree/status.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,5 +1,6 @@
use crate::dirstate::entry::TruncatedTimestamp;
use crate::dirstate::status::IgnoreFnType;
+use crate::dirstate::status::StatusPath;
use crate::dirstate_tree::dirstate_map::BorrowedPath;
use crate::dirstate_tree::dirstate_map::ChildNodesRef;
use crate::dirstate_tree::dirstate_map::DirstateMap;
@@ -15,6 +16,7 @@
use crate::DirstateStatus;
use crate::EntryState;
use crate::HgPathBuf;
+use crate::HgPathCow;
use crate::PatternFileWarning;
use crate::StatusError;
use crate::StatusOptions;
@@ -61,16 +63,42 @@
(Box::new(|&_| true), vec![], None)
};
+ let filesystem_time_at_status_start =
+ filesystem_now(&root_dir).ok().map(TruncatedTimestamp::from);
+
+ // If the repository is under the current directory, prefer using a
+ // relative path, so the kernel needs to traverse fewer directory in every
+ // call to `read_dir` or `symlink_metadata`.
+ // This is effective in the common case where the current directory is the
+ // repository root.
+
+ // TODO: Better yet would be to use libc functions like `openat` and
+ // `fstatat` to remove such repeated traversals entirely, but the standard
+ // library does not provide APIs based on those.
+ // Maybe with a crate like https://crates.io/crates/openat instead?
+ let root_dir = if let Some(relative) = std::env::current_dir()
+ .ok()
+ .and_then(|cwd| root_dir.strip_prefix(cwd).ok())
+ {
+ relative
+ } else {
+ &root_dir
+ };
+
+ let outcome = DirstateStatus {
+ filesystem_time_at_status_start,
+ ..Default::default()
+ };
let common = StatusCommon {
dmap,
options,
matcher,
ignore_fn,
- outcome: Default::default(),
+ outcome: Mutex::new(outcome),
ignore_patterns_have_changed: patterns_changed,
new_cachable_directories: Default::default(),
outated_cached_directories: Default::default(),
- filesystem_time_at_status_start: filesystem_now(&root_dir).ok(),
+ filesystem_time_at_status_start,
};
let is_at_repo_root = true;
let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
@@ -138,10 +166,68 @@
/// The current time at the start of the `status()` algorithm, as measured
/// and possibly truncated by the filesystem.
- filesystem_time_at_status_start: Option<SystemTime>,
+ filesystem_time_at_status_start: Option<TruncatedTimestamp>,
+}
+
+enum Outcome {
+ Modified,
+ Added,
+ Removed,
+ Deleted,
+ Clean,
+ Ignored,
+ Unknown,
+ Unsure,
}
impl<'a, 'tree, 'on_disk> StatusCommon<'a, 'tree, 'on_disk> {
+ fn push_outcome(
+ &self,
+ which: Outcome,
+ dirstate_node: &NodeRef<'tree, 'on_disk>,
+ ) -> Result<(), DirstateV2ParseError> {
+ let path = dirstate_node
+ .full_path_borrowed(self.dmap.on_disk)?
+ .detach_from_tree();
+ let copy_source = if self.options.list_copies {
+ dirstate_node
+ .copy_source_borrowed(self.dmap.on_disk)?
+ .map(|source| source.detach_from_tree())
+ } else {
+ None
+ };
+ self.push_outcome_common(which, path, copy_source);
+ Ok(())
+ }
+
+ fn push_outcome_without_copy_source(
+ &self,
+ which: Outcome,
+ path: &BorrowedPath<'_, 'on_disk>,
+ ) {
+ self.push_outcome_common(which, path.detach_from_tree(), None)
+ }
+
+ fn push_outcome_common(
+ &self,
+ which: Outcome,
+ path: HgPathCow<'on_disk>,
+ copy_source: Option<HgPathCow<'on_disk>>,
+ ) {
+ let mut outcome = self.outcome.lock().unwrap();
+ let vec = match which {
+ Outcome::Modified => &mut outcome.modified,
+ Outcome::Added => &mut outcome.added,
+ Outcome::Removed => &mut outcome.removed,
+ Outcome::Deleted => &mut outcome.deleted,
+ Outcome::Clean => &mut outcome.clean,
+ Outcome::Ignored => &mut outcome.ignored,
+ Outcome::Unknown => &mut outcome.unknown,
+ Outcome::Unsure => &mut outcome.unsure,
+ };
+ vec.push(StatusPath { path, copy_source });
+ }
+
fn read_dir(
&self,
hg_path: &HgPath,
@@ -342,10 +428,7 @@
// If we previously had a file here, it was removed (with
// `hg rm` or similar) or deleted before it could be
// replaced by a directory or something else.
- self.mark_removed_or_deleted_if_file(
- &hg_path,
- dirstate_node.state()?,
- );
+ self.mark_removed_or_deleted_if_file(&dirstate_node)?;
}
if file_type.is_dir() {
if self.options.collect_traversed_dirs {
@@ -376,24 +459,13 @@
if file_or_symlink && self.matcher.matches(hg_path) {
if let Some(state) = dirstate_node.state()? {
match state {
- EntryState::Added => self
- .outcome
- .lock()
- .unwrap()
- .added
- .push(hg_path.detach_from_tree()),
+ EntryState::Added => {
+ self.push_outcome(Outcome::Added, &dirstate_node)?
+ }
EntryState::Removed => self
- .outcome
- .lock()
- .unwrap()
- .removed
- .push(hg_path.detach_from_tree()),
+ .push_outcome(Outcome::Removed, &dirstate_node)?,
EntryState::Merged => self
- .outcome
- .lock()
- .unwrap()
- .modified
- .push(hg_path.detach_from_tree()),
+ .push_outcome(Outcome::Modified, &dirstate_node)?,
EntryState::Normal => self
.handle_normal_file(&dirstate_node, fs_metadata)?,
}
@@ -421,71 +493,86 @@
directory_metadata: &std::fs::Metadata,
dirstate_node: NodeRef<'tree, 'on_disk>,
) -> Result<(), DirstateV2ParseError> {
- if children_all_have_dirstate_node_or_are_ignored {
- // All filesystem directory entries from `read_dir` have a
- // corresponding node in the dirstate, so we can reconstitute the
- // names of those entries without calling `read_dir` again.
- if let (Some(status_start), Ok(directory_mtime)) = (
- &self.filesystem_time_at_status_start,
- directory_metadata.modified(),
+ if !children_all_have_dirstate_node_or_are_ignored {
+ return Ok(());
+ }
+ // All filesystem directory entries from `read_dir` have a
+ // corresponding node in the dirstate, so we can reconstitute the
+ // names of those entries without calling `read_dir` again.
+
+ // TODO: use let-else here and below when available:
+ // https://github.com/rust-lang/rust/issues/87335
+ let status_start = if let Some(status_start) =
+ &self.filesystem_time_at_status_start
+ {
+ status_start
+ } else {
+ return Ok(());
+ };
+
+ // Although the Rust standard library’s `SystemTime` type
+ // has nanosecond precision, the times reported for a
+ // directory’s (or file’s) modified time may have lower
+ // resolution based on the filesystem (for example ext3
+ // only stores integer seconds), kernel (see
+ // https://stackoverflow.com/a/14393315/1162888), etc.
+ let directory_mtime = if let Ok(option) =
+ TruncatedTimestamp::for_reliable_mtime_of(
+ directory_metadata,
+ status_start,
) {
- // Although the Rust standard library’s `SystemTime` type
- // has nanosecond precision, the times reported for a
- // directory’s (or file’s) modified time may have lower
- // resolution based on the filesystem (for example ext3
- // only stores integer seconds), kernel (see
- // https://stackoverflow.com/a/14393315/1162888), etc.
- if &directory_mtime >= status_start {
- // The directory was modified too recently, don’t cache its
- // `read_dir` results.
- //
- // A timeline like this is possible:
- //
- // 1. A change to this directory (direct child was
- // added or removed) cause its mtime to be set
- // (possibly truncated) to `directory_mtime`
- // 2. This `status` algorithm calls `read_dir`
- // 3. An other change is made to the same directory is
- // made so that calling `read_dir` agin would give
- // different results, but soon enough after 1. that
- // the mtime stays the same
- //
- // On a system where the time resolution poor, this
- // scenario is not unlikely if all three steps are caused
- // by the same script.
- } else {
- // We’ve observed (through `status_start`) that time has
- // “progressed” since `directory_mtime`, so any further
- // change to this directory is extremely likely to cause a
- // different mtime.
- //
- // Having the same mtime again is not entirely impossible
- // since the system clock is not monotonous. It could jump
- // backward to some point before `directory_mtime`, then a
- // directory change could potentially happen during exactly
- // the wrong tick.
- //
- // We deem this scenario (unlike the previous one) to be
- // unlikely enough in practice.
- let truncated = TruncatedTimestamp::from(directory_mtime);
- let is_up_to_date = if let Some(cached) =
- dirstate_node.cached_directory_mtime()?
- {
- cached.likely_equal(truncated)
- } else {
- false
- };
- if !is_up_to_date {
- let hg_path = dirstate_node
- .full_path_borrowed(self.dmap.on_disk)?
- .detach_from_tree();
- self.new_cachable_directories
- .lock()
- .unwrap()
- .push((hg_path, truncated))
- }
- }
+ if let Some(directory_mtime) = option {
+ directory_mtime
+ } else {
+ // The directory was modified too recently,
+ // don’t cache its `read_dir` results.
+ //
+ // 1. A change to this directory (direct child was
+ // added or removed) cause its mtime to be set
+ // (possibly truncated) to `directory_mtime`
+ // 2. This `status` algorithm calls `read_dir`
+ // 3. An other change is made to the same directory is
+ // made so that calling `read_dir` agin would give
+ // different results, but soon enough after 1. that
+ // the mtime stays the same
+ //
+ // On a system where the time resolution poor, this
+ // scenario is not unlikely if all three steps are caused
+ // by the same script.
+ return Ok(());
}
+ } else {
+ // OS/libc does not support mtime?
+ return Ok(());
+ };
+ // We’ve observed (through `status_start`) that time has
+ // “progressed” since `directory_mtime`, so any further
+ // change to this directory is extremely likely to cause a
+ // different mtime.
+ //
+ // Having the same mtime again is not entirely impossible
+ // since the system clock is not monotonous. It could jump
+ // backward to some point before `directory_mtime`, then a
+ // directory change could potentially happen during exactly
+ // the wrong tick.
+ //
+ // We deem this scenario (unlike the previous one) to be
+ // unlikely enough in practice.
+
+ let is_up_to_date =
+ if let Some(cached) = dirstate_node.cached_directory_mtime()? {
+ cached.likely_equal(directory_mtime)
+ } else {
+ false
+ };
+ if !is_up_to_date {
+ let hg_path = dirstate_node
+ .full_path_borrowed(self.dmap.on_disk)?
+ .detach_from_tree();
+ self.new_cachable_directories
+ .lock()
+ .unwrap()
+ .push((hg_path, directory_mtime))
}
Ok(())
}
@@ -505,7 +592,6 @@
let entry = dirstate_node
.entry()?
.expect("handle_normal_file called with entry-less node");
- let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
let mode_changed =
|| self.options.check_exec && entry.mode_changed(fs_metadata);
let size = entry.size();
@@ -513,43 +599,31 @@
if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
// issue6456: Size returned may be longer due to encryption
// on EXT-4 fscrypt. TODO maybe only do it on EXT4?
- self.outcome
- .lock()
- .unwrap()
- .unsure
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Unsure, dirstate_node)?
} else if dirstate_node.has_copy_source()
|| entry.is_from_other_parent()
|| (size >= 0 && (size_changed || mode_changed()))
{
- self.outcome
- .lock()
- .unwrap()
- .modified
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Modified, dirstate_node)?
} else {
let mtime_looks_clean;
if let Some(dirstate_mtime) = entry.truncated_mtime() {
let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
.expect("OS/libc does not support mtime?");
+ // There might be a change in the future if for example the
+ // internal clock become off while process run, but this is a
+ // case where the issues the user would face
+ // would be a lot worse and there is nothing we
+ // can really do.
mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime)
- && !fs_mtime.likely_equal(self.options.last_normal_time)
} else {
// No mtime in the dirstate entry
mtime_looks_clean = false
};
if !mtime_looks_clean {
- self.outcome
- .lock()
- .unwrap()
- .unsure
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Unsure, dirstate_node)?
} else if self.options.list_clean {
- self.outcome
- .lock()
- .unwrap()
- .clean
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Clean, dirstate_node)?
}
}
Ok(())
@@ -561,10 +635,7 @@
dirstate_node: NodeRef<'tree, 'on_disk>,
) -> Result<(), DirstateV2ParseError> {
self.check_for_outdated_directory_cache(&dirstate_node)?;
- self.mark_removed_or_deleted_if_file(
- &dirstate_node.full_path_borrowed(self.dmap.on_disk)?,
- dirstate_node.state()?,
- );
+ self.mark_removed_or_deleted_if_file(&dirstate_node)?;
dirstate_node
.children(self.dmap.on_disk)?
.par_iter()
@@ -578,26 +649,19 @@
/// Does nothing on a "directory" node
fn mark_removed_or_deleted_if_file(
&self,
- hg_path: &BorrowedPath<'tree, 'on_disk>,
- dirstate_node_state: Option<EntryState>,
- ) {
- if let Some(state) = dirstate_node_state {
- if self.matcher.matches(hg_path) {
+ dirstate_node: &NodeRef<'tree, 'on_disk>,
+ ) -> Result<(), DirstateV2ParseError> {
+ if let Some(state) = dirstate_node.state()? {
+ let path = dirstate_node.full_path(self.dmap.on_disk)?;
+ if self.matcher.matches(path) {
if let EntryState::Removed = state {
- self.outcome
- .lock()
- .unwrap()
- .removed
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Removed, dirstate_node)?
} else {
- self.outcome
- .lock()
- .unwrap()
- .deleted
- .push(hg_path.detach_from_tree())
+ self.push_outcome(Outcome::Deleted, &dirstate_node)?
}
}
}
+ Ok(())
}
/// Something in the filesystem has no corresponding dirstate node
@@ -675,19 +739,17 @@
let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
if is_ignored {
if self.options.list_ignored {
- self.outcome
- .lock()
- .unwrap()
- .ignored
- .push(hg_path.detach_from_tree())
+ self.push_outcome_without_copy_source(
+ Outcome::Ignored,
+ hg_path,
+ )
}
} else {
if self.options.list_unknown {
- self.outcome
- .lock()
- .unwrap()
- .unknown
- .push(hg_path.detach_from_tree())
+ self.push_outcome_without_copy_source(
+ Outcome::Unknown,
+ hg_path,
+ )
}
}
is_ignored
@@ -710,8 +772,11 @@
/// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
/// list instead.
fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
+ // `read_dir` returns a "not found" error for the empty path
+ let at_cwd = path == Path::new("");
+ let read_dir_path = if at_cwd { Path::new(".") } else { path };
let mut results = Vec::new();
- for entry in path.read_dir()? {
+ for entry in read_dir_path.read_dir()? {
let entry = entry?;
let metadata = match entry.metadata() {
Ok(v) => v,
@@ -724,9 +789,9 @@
}
}
};
- let name = get_bytes_from_os_string(entry.file_name());
+ let file_name = entry.file_name();
// FIXME don't do this when cached
- if name == b".hg" {
+ if file_name == ".hg" {
if is_at_repo_root {
// Skip the repo’s own .hg (might be a symlink)
continue;
@@ -736,9 +801,15 @@
return Ok(Vec::new());
}
}
+ let full_path = if at_cwd {
+ file_name.clone().into()
+ } else {
+ entry.path()
+ };
+ let base_name = get_bytes_from_os_string(file_name).into();
results.push(DirEntry {
- base_name: name.into(),
- full_path: entry.path(),
+ base_name,
+ full_path,
metadata,
})
}
--- a/rust/hg-core/src/errors.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/errors.rs Fri Feb 18 14:27:43 2022 +0100
@@ -151,6 +151,8 @@
/// Converts a `Result` with `std::io::Error` into one with `HgError`.
fn when_reading_file(self, path: &std::path::Path) -> Result<T, HgError>;
+ fn when_writing_file(self, path: &std::path::Path) -> Result<T, HgError>;
+
fn with_context(
self,
context: impl FnOnce() -> IoErrorContext,
@@ -162,6 +164,10 @@
self.with_context(|| IoErrorContext::ReadingFile(path.to_owned()))
}
+ fn when_writing_file(self, path: &std::path::Path) -> Result<T, HgError> {
+ self.with_context(|| IoErrorContext::WritingFile(path.to_owned()))
+ }
+
fn with_context(
self,
context: impl FnOnce() -> IoErrorContext,
--- a/rust/hg-core/src/lib.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/lib.rs Fri Feb 18 14:27:43 2022 +0100
@@ -7,7 +7,7 @@
mod ancestors;
pub mod dagops;
pub mod errors;
-pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
+pub use ancestors::{AncestorsIterator, MissingAncestors};
pub mod dirstate;
pub mod dirstate_tree;
pub mod discovery;
@@ -29,6 +29,7 @@
pub mod revlog;
pub use revlog::*;
pub mod config;
+pub mod lock;
pub mod logging;
pub mod operations;
pub mod revset;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/lock.rs Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,187 @@
+//! Filesystem-based locks for local repositories
+
+use crate::errors::HgError;
+use crate::errors::HgResultExt;
+use crate::utils::StrExt;
+use crate::vfs::Vfs;
+use std::io;
+use std::io::ErrorKind;
+
+#[derive(derive_more::From)]
+pub enum LockError {
+ AlreadyHeld,
+ #[from]
+ Other(HgError),
+}
+
+/// Try to call `f` with the lock acquired, without waiting.
+///
+/// If the lock is aready held, `f` is not called and `LockError::AlreadyHeld`
+/// is returned. `LockError::Io` is returned for any unexpected I/O error
+/// accessing the lock file, including for removing it after `f` was called.
+/// The return value of `f` is dropped in that case. If all is successful, the
+/// return value of `f` is forwarded.
+pub fn try_with_lock_no_wait<R>(
+ hg_vfs: Vfs,
+ lock_filename: &str,
+ f: impl FnOnce() -> R,
+) -> Result<R, LockError> {
+ let our_lock_data = &*OUR_LOCK_DATA;
+ for _retry in 0..5 {
+ match make_lock(hg_vfs, lock_filename, our_lock_data) {
+ Ok(()) => {
+ let result = f();
+ unlock(hg_vfs, lock_filename)?;
+ return Ok(result);
+ }
+ Err(HgError::IoError { error, .. })
+ if error.kind() == ErrorKind::AlreadyExists =>
+ {
+ let lock_data = read_lock(hg_vfs, lock_filename)?;
+ if lock_data.is_none() {
+ // Lock was apparently just released, retry acquiring it
+ continue;
+ }
+ if !lock_should_be_broken(&lock_data) {
+ return Err(LockError::AlreadyHeld);
+ }
+ // The lock file is left over from a process not running
+ // anymore. Break it, but with another lock to
+ // avoid a race.
+ break_lock(hg_vfs, lock_filename)?;
+
+ // Retry acquiring
+ }
+ Err(error) => Err(error)?,
+ }
+ }
+ Err(LockError::AlreadyHeld)
+}
+
+fn break_lock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), LockError> {
+ try_with_lock_no_wait(hg_vfs, &format!("{}.break", lock_filename), || {
+ // Check again in case some other process broke and
+ // acquired the lock in the meantime
+ let lock_data = read_lock(hg_vfs, lock_filename)?;
+ if !lock_should_be_broken(&lock_data) {
+ return Err(LockError::AlreadyHeld);
+ }
+ Ok(hg_vfs.remove_file(lock_filename)?)
+ })?
+}
+
+#[cfg(unix)]
+fn make_lock(
+ hg_vfs: Vfs,
+ lock_filename: &str,
+ data: &str,
+) -> Result<(), HgError> {
+ // Use a symbolic link because creating it is atomic.
+ // The link’s "target" contains data not representing any path.
+ let fake_symlink_target = data;
+ hg_vfs.create_symlink(lock_filename, fake_symlink_target)
+}
+
+fn read_lock(
+ hg_vfs: Vfs,
+ lock_filename: &str,
+) -> Result<Option<String>, HgError> {
+ let link_target =
+ hg_vfs.read_link(lock_filename).io_not_found_as_none()?;
+ if let Some(target) = link_target {
+ let data = target
+ .into_os_string()
+ .into_string()
+ .map_err(|_| HgError::corrupted("non-UTF-8 lock data"))?;
+ Ok(Some(data))
+ } else {
+ Ok(None)
+ }
+}
+
+fn unlock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), HgError> {
+ hg_vfs.remove_file(lock_filename)
+}
+
+/// Return whether the process that is/was holding the lock is known not to be
+/// running anymore.
+fn lock_should_be_broken(data: &Option<String>) -> bool {
+ (|| -> Option<bool> {
+ let (prefix, pid) = data.as_ref()?.split_2(':')?;
+ if prefix != &*LOCK_PREFIX {
+ return Some(false);
+ }
+ let process_is_running;
+
+ #[cfg(unix)]
+ {
+ let pid: libc::pid_t = pid.parse().ok()?;
+ unsafe {
+ let signal = 0; // Test if we could send a signal, without sending
+ let result = libc::kill(pid, signal);
+ if result == 0 {
+ process_is_running = true
+ } else {
+ let errno =
+ io::Error::last_os_error().raw_os_error().unwrap();
+ process_is_running = errno != libc::ESRCH
+ }
+ }
+ }
+
+ Some(!process_is_running)
+ })()
+ .unwrap_or(false)
+}
+
+lazy_static::lazy_static! {
+ /// A string which is used to differentiate pid namespaces
+ ///
+ /// It's useful to detect "dead" processes and remove stale locks with
+ /// confidence. Typically it's just hostname. On modern linux, we include an
+ /// extra Linux-specific pid namespace identifier.
+ static ref LOCK_PREFIX: String = {
+ // Note: this must match the behavior of `_getlockprefix` in `mercurial/lock.py`
+
+ /// Same as https://github.com/python/cpython/blob/v3.10.0/Modules/socketmodule.c#L5414
+ const BUFFER_SIZE: usize = 1024;
+ let mut buffer = [0_i8; BUFFER_SIZE];
+ let hostname_bytes = unsafe {
+ let result = libc::gethostname(buffer.as_mut_ptr(), BUFFER_SIZE);
+ if result != 0 {
+ panic!("gethostname: {}", io::Error::last_os_error())
+ }
+ std::ffi::CStr::from_ptr(buffer.as_mut_ptr()).to_bytes()
+ };
+ let hostname =
+ std::str::from_utf8(hostname_bytes).expect("non-UTF-8 hostname");
+
+ #[cfg(target_os = "linux")]
+ {
+ use std::os::linux::fs::MetadataExt;
+ match std::fs::metadata("/proc/self/ns/pid") {
+ Ok(meta) => {
+ return format!("{}/{:x}", hostname, meta.st_ino())
+ }
+ Err(error) => {
+ // TODO: match on `error.kind()` when `NotADirectory`
+ // is available on all supported Rust versions:
+ // https://github.com/rust-lang/rust/issues/86442
+ use libc::{
+ ENOENT, // ErrorKind::NotFound
+ ENOTDIR, // ErrorKind::NotADirectory
+ EACCES, // ErrorKind::PermissionDenied
+ };
+ match error.raw_os_error() {
+ Some(ENOENT) | Some(ENOTDIR) | Some(EACCES) => {}
+ _ => panic!("stat /proc/self/ns/pid: {}", error),
+ }
+ }
+ }
+ }
+
+ hostname.to_owned()
+ };
+
+ static ref OUR_LOCK_DATA: String = format!("{}:{}", &*LOCK_PREFIX, std::process::id());
+}
--- a/rust/hg-core/src/matchers.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/matchers.rs Fri Feb 18 14:27:43 2022 +0100
@@ -22,6 +22,7 @@
PatternSyntax,
};
+use crate::dirstate::status::IgnoreFnType;
use crate::filepatterns::normalize_path_bytes;
use std::borrow::ToOwned;
use std::collections::HashSet;
@@ -246,7 +247,7 @@
/// ```
pub struct IncludeMatcher<'a> {
patterns: Vec<u8>,
- match_fn: Box<dyn for<'r> Fn(&'r HgPath) -> bool + 'a + Sync>,
+ match_fn: IgnoreFnType<'a>,
/// Whether all the patterns match a prefix (i.e. recursively)
prefix: bool,
roots: HashSet<HgPathBuf>,
@@ -341,9 +342,9 @@
/// Returns the regex pattern and a function that matches an `HgPath` against
/// said regex formed by the given ignore patterns.
-fn build_regex_match(
- ignore_patterns: &[IgnorePattern],
-) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + Sync>)> {
+fn build_regex_match<'a, 'b>(
+ ignore_patterns: &'a [IgnorePattern],
+) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> {
let mut regexps = vec![];
let mut exact_set = HashSet::new();
@@ -365,10 +366,10 @@
let func = move |filename: &HgPath| {
exact_set.contains(filename) || matcher(filename)
};
- Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ Box::new(func) as IgnoreFnType
} else {
let func = move |filename: &HgPath| exact_set.contains(filename);
- Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ Box::new(func) as IgnoreFnType
};
Ok((full_regex, func))
@@ -476,8 +477,8 @@
/// should be matched.
fn build_match<'a, 'b>(
ignore_patterns: Vec<IgnorePattern>,
-) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + 'b + Sync>)> {
- let mut match_funcs: Vec<Box<dyn Fn(&HgPath) -> bool + Sync>> = vec![];
+) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> {
+ let mut match_funcs: Vec<IgnoreFnType<'b>> = vec![];
// For debugging and printing
let mut patterns = vec![];
@@ -560,14 +561,11 @@
/// Parses all "ignore" files with their recursive includes and returns a
/// function that checks whether a given file (in the general sense) should be
/// ignored.
-pub fn get_ignore_function<'a>(
+pub fn get_ignore_matcher<'a>(
mut all_pattern_files: Vec<PathBuf>,
root_dir: &Path,
inspect_pattern_bytes: &mut impl FnMut(&[u8]),
-) -> PatternResult<(
- Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>,
- Vec<PatternFileWarning>,
-)> {
+) -> PatternResult<(IncludeMatcher<'a>, Vec<PatternFileWarning>)> {
let mut all_patterns = vec![];
let mut all_warnings = vec![];
@@ -590,10 +588,25 @@
all_warnings.extend(warnings);
}
let matcher = IncludeMatcher::new(all_patterns)?;
- Ok((
- Box::new(move |path: &HgPath| matcher.matches(path)),
- all_warnings,
- ))
+ Ok((matcher, all_warnings))
+}
+
+/// Parses all "ignore" files with their recursive includes and returns a
+/// function that checks whether a given file (in the general sense) should be
+/// ignored.
+pub fn get_ignore_function<'a>(
+ all_pattern_files: Vec<PathBuf>,
+ root_dir: &Path,
+ inspect_pattern_bytes: &mut impl FnMut(&[u8]),
+) -> PatternResult<(IgnoreFnType<'a>, Vec<PatternFileWarning>)> {
+ let res =
+ get_ignore_matcher(all_pattern_files, root_dir, inspect_pattern_bytes);
+ res.map(|(matcher, all_warnings)| {
+ let res: IgnoreFnType<'a> =
+ Box::new(move |path: &HgPath| matcher.matches(path));
+
+ (res, all_warnings)
+ })
}
impl<'a> IncludeMatcher<'a> {
@@ -628,6 +641,10 @@
.chain(self.parents.iter());
DirsChildrenMultiset::new(thing, Some(&self.parents))
}
+
+ pub fn debug_get_patterns(&self) -> &[u8] {
+ self.patterns.as_ref()
+ }
}
impl<'a> Display for IncludeMatcher<'a> {
--- a/rust/hg-core/src/operations/cat.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/operations/cat.rs Fri Feb 18 14:27:43 2022 +0100
@@ -11,6 +11,9 @@
use crate::utils::hg_path::HgPath;
+use crate::errors::HgError;
+use crate::manifest::Manifest;
+use crate::manifest::ManifestEntry;
use itertools::put_back;
use itertools::PutBack;
use std::cmp::Ordering;
@@ -28,46 +31,43 @@
}
// Find an item in an iterator over a sorted collection.
-fn find_item<'a, 'b, 'c, D, I: Iterator<Item = (&'a HgPath, D)>>(
- i: &mut PutBack<I>,
- needle: &'b HgPath,
-) -> Option<D> {
+fn find_item<'a>(
+ i: &mut PutBack<impl Iterator<Item = Result<ManifestEntry<'a>, HgError>>>,
+ needle: &HgPath,
+) -> Result<Option<Node>, HgError> {
loop {
match i.next() {
- None => return None,
- Some(val) => match needle.as_bytes().cmp(val.0.as_bytes()) {
- Ordering::Less => {
- i.put_back(val);
- return None;
+ None => return Ok(None),
+ Some(result) => {
+ let entry = result?;
+ match needle.as_bytes().cmp(entry.path.as_bytes()) {
+ Ordering::Less => {
+ i.put_back(Ok(entry));
+ return Ok(None);
+ }
+ Ordering::Greater => continue,
+ Ordering::Equal => return Ok(Some(entry.node_id()?)),
}
- Ordering::Greater => continue,
- Ordering::Equal => return Some(val.1),
- },
+ }
}
}
}
-fn find_files_in_manifest<
- 'manifest,
- 'query,
- Data,
- Manifest: Iterator<Item = (&'manifest HgPath, Data)>,
- Query: Iterator<Item = &'query HgPath>,
->(
- manifest: Manifest,
- query: Query,
-) -> (Vec<(&'query HgPath, Data)>, Vec<&'query HgPath>) {
- let mut manifest = put_back(manifest);
+fn find_files_in_manifest<'query>(
+ manifest: &Manifest,
+ query: impl Iterator<Item = &'query HgPath>,
+) -> Result<(Vec<(&'query HgPath, Node)>, Vec<&'query HgPath>), HgError> {
+ let mut manifest = put_back(manifest.iter());
let mut res = vec![];
let mut missing = vec![];
for file in query {
- match find_item(&mut manifest, file) {
+ match find_item(&mut manifest, file)? {
None => missing.push(file),
Some(item) => res.push((file, item)),
}
}
- return (res, missing);
+ return Ok((res, missing));
}
/// Output the given revision of files
@@ -92,17 +92,16 @@
files.sort_unstable();
let (found, missing) = find_files_in_manifest(
- manifest.files_with_nodes(),
+ &manifest,
files.into_iter().map(|f| f.as_ref()),
- );
+ )?;
- for (file_path, node_bytes) in found {
+ for (file_path, file_node) in found {
found_any = true;
let file_log = repo.filelog(file_path)?;
- let file_node = Node::from_hex_for_repo(node_bytes)?;
results.push((
file_path,
- file_log.data_for_node(file_node)?.into_data()?,
+ file_log.data_for_node(file_node)?.into_file_data()?,
));
}
--- a/rust/hg-core/src/operations/debugdata.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/operations/debugdata.rs Fri Feb 18 14:27:43 2022 +0100
@@ -29,5 +29,5 @@
let rev =
crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?;
let data = revlog.get_rev_data(rev)?;
- Ok(data)
+ Ok(data.into_owned())
}
--- a/rust/hg-core/src/operations/list_tracked_files.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/operations/list_tracked_files.rs Fri Feb 18 14:27:43 2022 +0100
@@ -76,7 +76,7 @@
pub struct FilesForRev(Manifest);
impl FilesForRev {
- pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
- self.0.files()
+ pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
+ self.0.iter().map(|entry| Ok(entry?.path))
}
}
--- a/rust/hg-core/src/repo.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/repo.rs Fri Feb 18 14:27:43 2022 +0100
@@ -2,10 +2,12 @@
use crate::config::{Config, ConfigError, ConfigParseError};
use crate::dirstate::DirstateParents;
use crate::dirstate_tree::dirstate_map::DirstateMap;
+use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
use crate::dirstate_tree::owning::OwningDirstateMap;
-use crate::errors::HgError;
use crate::errors::HgResultExt;
+use crate::errors::{HgError, IoResultExt};
use crate::exit_codes;
+use crate::lock::{try_with_lock_no_wait, LockError};
use crate::manifest::{Manifest, Manifestlog};
use crate::revlog::filelog::Filelog;
use crate::revlog::revlog::RevlogError;
@@ -15,8 +17,11 @@
use crate::vfs::{is_dir, is_file, Vfs};
use crate::{requirements, NodePrefix};
use crate::{DirstateError, Revision};
-use std::cell::{Cell, Ref, RefCell, RefMut};
+use std::cell::{Ref, RefCell, RefMut};
use std::collections::HashSet;
+use std::io::Seek;
+use std::io::SeekFrom;
+use std::io::Write as IoWrite;
use std::path::{Path, PathBuf};
/// A repository on disk
@@ -26,8 +31,8 @@
store: PathBuf,
requirements: HashSet<String>,
config: Config,
- // None means not known/initialized yet
- dirstate_parents: Cell<Option<DirstateParents>>,
+ dirstate_parents: LazyCell<DirstateParents, HgError>,
+ dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>, HgError>,
dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
changelog: LazyCell<Changelog, HgError>,
manifestlog: LazyCell<Manifestlog, HgError>,
@@ -202,7 +207,10 @@
store: store_path,
dot_hg,
config: repo_config,
- dirstate_parents: Cell::new(None),
+ dirstate_parents: LazyCell::new(Self::read_dirstate_parents),
+ dirstate_data_file_uuid: LazyCell::new(
+ Self::read_dirstate_data_file_uuid,
+ ),
dirstate_map: LazyCell::new(Self::new_dirstate_map),
changelog: LazyCell::new(Changelog::open),
manifestlog: LazyCell::new(Manifestlog::open),
@@ -243,11 +251,26 @@
}
}
+ pub fn try_with_wlock_no_wait<R>(
+ &self,
+ f: impl FnOnce() -> R,
+ ) -> Result<R, LockError> {
+ try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
+ }
+
pub fn has_dirstate_v2(&self) -> bool {
self.requirements
.contains(requirements::DIRSTATE_V2_REQUIREMENT)
}
+ pub fn has_sparse(&self) -> bool {
+ self.requirements.contains(requirements::SPARSE_REQUIREMENT)
+ }
+
+ pub fn has_narrow(&self) -> bool {
+ self.requirements.contains(requirements::NARROW_REQUIREMENT)
+ }
+
fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
Ok(self
.hg_vfs()
@@ -257,32 +280,64 @@
}
pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
- if let Some(parents) = self.dirstate_parents.get() {
- return Ok(parents);
- }
+ Ok(*self.dirstate_parents.get_or_init(self)?)
+ }
+
+ fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
let dirstate = self.dirstate_file_contents()?;
let parents = if dirstate.is_empty() {
+ if self.has_dirstate_v2() {
+ self.dirstate_data_file_uuid.set(None);
+ }
DirstateParents::NULL
} else if self.has_dirstate_v2() {
- crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
+ let docket =
+ crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
+ self.dirstate_data_file_uuid
+ .set(Some(docket.uuid.to_owned()));
+ docket.parents()
} else {
crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
.clone()
};
- self.dirstate_parents.set(Some(parents));
+ self.dirstate_parents.set(parents);
Ok(parents)
}
+ fn read_dirstate_data_file_uuid(
+ &self,
+ ) -> Result<Option<Vec<u8>>, HgError> {
+ assert!(
+ self.has_dirstate_v2(),
+ "accessing dirstate data file ID without dirstate-v2"
+ );
+ let dirstate = self.dirstate_file_contents()?;
+ if dirstate.is_empty() {
+ self.dirstate_parents.set(DirstateParents::NULL);
+ Ok(None)
+ } else {
+ let docket =
+ crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
+ self.dirstate_parents.set(docket.parents());
+ Ok(Some(docket.uuid.to_owned()))
+ }
+ }
+
fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
let dirstate_file_contents = self.dirstate_file_contents()?;
if dirstate_file_contents.is_empty() {
- self.dirstate_parents.set(Some(DirstateParents::NULL));
+ self.dirstate_parents.set(DirstateParents::NULL);
+ if self.has_dirstate_v2() {
+ self.dirstate_data_file_uuid.set(None);
+ }
Ok(OwningDirstateMap::new_empty(Vec::new()))
} else if self.has_dirstate_v2() {
let docket = crate::dirstate_tree::on_disk::read_docket(
&dirstate_file_contents,
)?;
- self.dirstate_parents.set(Some(docket.parents()));
+ self.dirstate_parents.set(docket.parents());
+ self.dirstate_data_file_uuid
+ .set(Some(docket.uuid.to_owned()));
let data_size = docket.data_size();
let metadata = docket.tree_metadata();
let mut map = if let Some(data_mmap) = self
@@ -302,7 +357,7 @@
let (on_disk, placeholder) = map.get_pair_mut();
let (inner, parents) = DirstateMap::new_v1(on_disk)?;
self.dirstate_parents
- .set(Some(parents.unwrap_or(DirstateParents::NULL)));
+ .set(parents.unwrap_or(DirstateParents::NULL));
*placeholder = inner;
Ok(map)
}
@@ -362,9 +417,81 @@
)
}
+ pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
+ if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
+ Ok(entry.state().is_tracked())
+ } else {
+ Ok(false)
+ }
+ }
+
pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
Filelog::open(self, path)
}
+
+ /// Write to disk any updates that were made through `dirstate_map_mut`.
+ ///
+ /// The "wlock" must be held while calling this.
+ /// See for example `try_with_wlock_no_wait`.
+ ///
+ /// TODO: have a `WritableRepo` type only accessible while holding the
+ /// lock?
+ pub fn write_dirstate(&self) -> Result<(), DirstateError> {
+ let map = self.dirstate_map()?;
+ // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
+ // it’s unset
+ let parents = self.dirstate_parents()?;
+ let packed_dirstate = if self.has_dirstate_v2() {
+ let uuid = self.dirstate_data_file_uuid.get_or_init(self)?;
+ let mut uuid = uuid.as_ref();
+ let can_append = uuid.is_some();
+ let (data, tree_metadata, append) = map.pack_v2(can_append)?;
+ if !append {
+ uuid = None
+ }
+ let uuid = if let Some(uuid) = uuid {
+ std::str::from_utf8(uuid)
+ .map_err(|_| {
+ HgError::corrupted("non-UTF-8 dirstate data file ID")
+ })?
+ .to_owned()
+ } else {
+ DirstateDocket::new_uid()
+ };
+ let data_filename = format!("dirstate.{}", uuid);
+ let data_filename = self.hg_vfs().join(data_filename);
+ let mut options = std::fs::OpenOptions::new();
+ if append {
+ options.append(true);
+ } else {
+ options.write(true).create_new(true);
+ }
+ let data_size = (|| {
+ // TODO: loop and try another random ID if !append and this
+ // returns `ErrorKind::AlreadyExists`? Collision chance of two
+ // random IDs is one in 2**32
+ let mut file = options.open(&data_filename)?;
+ file.write_all(&data)?;
+ file.flush()?;
+ // TODO: use https://doc.rust-lang.org/std/io/trait.Seek.html#method.stream_position when we require Rust 1.51+
+ file.seek(SeekFrom::Current(0))
+ })()
+ .when_writing_file(&data_filename)?;
+ DirstateDocket::serialize(
+ parents,
+ tree_metadata,
+ data_size,
+ uuid.as_bytes(),
+ )
+ .map_err(|_: std::num::TryFromIntError| {
+ HgError::corrupted("overflow in dirstate docket serialization")
+ })?
+ } else {
+ map.pack_v1(parents)?
+ };
+ self.hg_vfs().atomic_write("dirstate", &packed_dirstate)?;
+ Ok(())
+ }
}
/// Lazily-initialized component of `Repo` with interior mutability
@@ -386,6 +513,10 @@
}
}
+ fn set(&self, value: T) {
+ *self.value.borrow_mut() = Some(value)
+ }
+
fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
let mut borrowed = self.value.borrow();
if borrowed.is_none() {
@@ -399,7 +530,7 @@
Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
}
- pub fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
+ fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
let mut borrowed = self.value.borrow_mut();
if borrowed.is_none() {
*borrowed = Some((self.init)(repo)?);
--- a/rust/hg-core/src/requirements.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/requirements.rs Fri Feb 18 14:27:43 2022 +0100
@@ -88,6 +88,10 @@
// When it starts writing to the repository, it’ll need to either keep the
// persistent nodemap up to date or remove this entry:
NODEMAP_REQUIREMENT,
+ // Not all commands support `sparse` and `narrow`. The commands that do
+ // not should opt out by checking `has_sparse` and `has_narrow`.
+ SPARSE_REQUIREMENT,
+ NARROW_REQUIREMENT,
];
// Copied from mercurial/requirements.py:
--- a/rust/hg-core/src/revlog/changelog.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/changelog.rs Fri Feb 18 14:27:43 2022 +0100
@@ -22,7 +22,7 @@
pub fn data_for_node(
&self,
node: NodePrefix,
- ) -> Result<ChangelogEntry, RevlogError> {
+ ) -> Result<ChangelogRevisionData, RevlogError> {
let rev = self.revlog.rev_from_node(node)?;
self.data_for_rev(rev)
}
@@ -31,9 +31,9 @@
pub fn data_for_rev(
&self,
rev: Revision,
- ) -> Result<ChangelogEntry, RevlogError> {
- let bytes = self.revlog.get_rev_data(rev)?;
- Ok(ChangelogEntry { bytes })
+ ) -> Result<ChangelogRevisionData, RevlogError> {
+ let bytes = self.revlog.get_rev_data(rev)?.into_owned();
+ Ok(ChangelogRevisionData { bytes })
}
pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
@@ -43,12 +43,12 @@
/// `Changelog` entry which knows how to interpret the `changelog` data bytes.
#[derive(Debug)]
-pub struct ChangelogEntry {
+pub struct ChangelogRevisionData {
/// The data bytes of the `changelog` entry.
bytes: Vec<u8>,
}
-impl ChangelogEntry {
+impl ChangelogRevisionData {
/// Return an iterator over the lines of the entry.
pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
self.bytes
--- a/rust/hg-core/src/revlog/filelog.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/filelog.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,6 +1,7 @@
use crate::errors::HgError;
use crate::repo::Repo;
use crate::revlog::path_encode::path_encode;
+use crate::revlog::revlog::RevlogEntry;
use crate::revlog::revlog::{Revlog, RevlogError};
use crate::revlog::NodePrefix;
use crate::revlog::Revision;
@@ -23,24 +24,43 @@
Ok(Self { revlog })
}
- /// The given node ID is that of the file as found in a manifest, not of a
+ /// The given node ID is that of the file as found in a filelog, not of a
/// changeset.
pub fn data_for_node(
&self,
file_node: impl Into<NodePrefix>,
+ ) -> Result<FilelogRevisionData, RevlogError> {
+ let file_rev = self.revlog.rev_from_node(file_node.into())?;
+ self.data_for_rev(file_rev)
+ }
+
+ /// The given revision is that of the file as found in a filelog, not of a
+ /// changeset.
+ pub fn data_for_rev(
+ &self,
+ file_rev: Revision,
+ ) -> Result<FilelogRevisionData, RevlogError> {
+ let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
+ Ok(FilelogRevisionData(data.into()))
+ }
+
+ /// The given node ID is that of the file as found in a filelog, not of a
+ /// changeset.
+ pub fn entry_for_node(
+ &self,
+ file_node: impl Into<NodePrefix>,
) -> Result<FilelogEntry, RevlogError> {
let file_rev = self.revlog.rev_from_node(file_node.into())?;
- self.data_for_rev(file_rev)
+ self.entry_for_rev(file_rev)
}
- /// The given revision is that of the file as found in a manifest, not of a
+ /// The given revision is that of the file as found in a filelog, not of a
/// changeset.
- pub fn data_for_rev(
+ pub fn entry_for_rev(
&self,
file_rev: Revision,
) -> Result<FilelogEntry, RevlogError> {
- let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?;
- Ok(FilelogEntry(data.into()))
+ Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
}
}
@@ -50,9 +70,101 @@
get_path_from_bytes(&encoded_bytes).into()
}
-pub struct FilelogEntry(Vec<u8>);
+pub struct FilelogEntry<'a>(RevlogEntry<'a>);
+
+impl FilelogEntry<'_> {
+ /// `self.data()` can be expensive, with decompression and delta
+ /// resolution.
+ ///
+ /// *Without* paying this cost, based on revlog index information
+ /// including `RevlogEntry::uncompressed_len`:
+ ///
+ /// * Returns `true` if the length that `self.data().file_data().len()`
+ /// would return is definitely **not equal** to `other_len`.
+ /// * Returns `false` if available information is inconclusive.
+ pub fn file_data_len_not_equal_to(&self, other_len: u64) -> bool {
+ // Relevant code that implement this behavior in Python code:
+ // basefilectx.cmp, filelog.size, storageutil.filerevisioncopied,
+ // revlog.size, revlog.rawsize
+
+ // Let’s call `file_data_len` what would be returned by
+ // `self.data().file_data().len()`.
+
+ if self.0.is_cencored() {
+ let file_data_len = 0;
+ return other_len != file_data_len;
+ }
+
+ if self.0.has_length_affecting_flag_processor() {
+ // We can’t conclude anything about `file_data_len`.
+ return false;
+ }
-impl FilelogEntry {
+ // Revlog revisions (usually) have metadata for the size of
+ // their data after decompression and delta resolution
+ // as would be returned by `Revlog::get_rev_data`.
+ //
+ // For filelogs this is the file’s contents preceded by an optional
+ // metadata block.
+ let uncompressed_len = if let Some(l) = self.0.uncompressed_len() {
+ l as u64
+ } else {
+ // The field was set to -1, the actual uncompressed len is unknown.
+ // We need to decompress to say more.
+ return false;
+ };
+ // `uncompressed_len = file_data_len + optional_metadata_len`,
+ // so `file_data_len <= uncompressed_len`.
+ if uncompressed_len < other_len {
+ // Transitively, `file_data_len < other_len`.
+ // So `other_len != file_data_len` definitely.
+ return true;
+ }
+
+ if uncompressed_len == other_len + 4 {
+ // It’s possible that `file_data_len == other_len` with an empty
+ // metadata block (2 start marker bytes + 2 end marker bytes).
+ // This happens when there wouldn’t otherwise be metadata, but
+ // the first 2 bytes of file data happen to match a start marker
+ // and would be ambiguous.
+ return false;
+ }
+
+ if !self.0.has_p1() {
+ // There may or may not be copy metadata, so we can’t deduce more
+ // about `file_data_len` without computing file data.
+ return false;
+ }
+
+ // Filelog ancestry is not meaningful in the way changelog ancestry is.
+ // It only provides hints to delta generation.
+ // p1 and p2 are set to null when making a copy or rename since
+ // contents are likely unrelatedto what might have previously existed
+ // at the destination path.
+ //
+ // Conversely, since here p1 is non-null, there is no copy metadata.
+ // Note that this reasoning may be invalidated in the presence of
+ // merges made by some previous versions of Mercurial that
+ // swapped p1 and p2. See <https://bz.mercurial-scm.org/show_bug.cgi?id=6528>
+ // and `tests/test-issue6528.t`.
+ //
+ // Since copy metadata is currently the only kind of metadata
+ // kept in revlog data of filelogs,
+ // this `FilelogEntry` does not have such metadata:
+ let file_data_len = uncompressed_len;
+
+ return file_data_len != other_len;
+ }
+
+ pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
+ Ok(FilelogRevisionData(self.0.data()?.into_owned()))
+ }
+}
+
+/// The data for one revision in a filelog, uncompressed and delta-resolved.
+pub struct FilelogRevisionData(Vec<u8>);
+
+impl FilelogRevisionData {
/// Split into metadata and data
pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
@@ -71,14 +183,14 @@
}
/// Returns the file contents at this revision, stripped of any metadata
- pub fn data(&self) -> Result<&[u8], HgError> {
+ pub fn file_data(&self) -> Result<&[u8], HgError> {
let (_metadata, data) = self.split()?;
Ok(data)
}
/// Consume the entry, and convert it into data, discarding any metadata,
/// if present.
- pub fn into_data(self) -> Result<Vec<u8>, HgError> {
+ pub fn into_file_data(self) -> Result<Vec<u8>, HgError> {
if let (Some(_metadata), data) = self.split()? {
Ok(data.to_owned())
} else {
--- a/rust/hg-core/src/revlog/index.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/index.rs Fri Feb 18 14:27:43 2022 +0100
@@ -9,12 +9,82 @@
pub const INDEX_ENTRY_SIZE: usize = 64;
+pub struct IndexHeader {
+ header_bytes: [u8; 4],
+}
+
+#[derive(Copy, Clone)]
+pub struct IndexHeaderFlags {
+ flags: u16,
+}
+
+/// Corresponds to the high bits of `_format_flags` in python
+impl IndexHeaderFlags {
+ /// Corresponds to FLAG_INLINE_DATA in python
+ pub fn is_inline(self) -> bool {
+ return self.flags & 1 != 0;
+ }
+ /// Corresponds to FLAG_GENERALDELTA in python
+ pub fn uses_generaldelta(self) -> bool {
+ return self.flags & 2 != 0;
+ }
+}
+
+/// Corresponds to the INDEX_HEADER structure,
+/// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
+impl IndexHeader {
+ fn format_flags(&self) -> IndexHeaderFlags {
+ // No "unknown flags" check here, unlike in python. Maybe there should
+ // be.
+ return IndexHeaderFlags {
+ flags: BigEndian::read_u16(&self.header_bytes[0..2]),
+ };
+ }
+
+ /// The only revlog version currently supported by rhg.
+ const REVLOGV1: u16 = 1;
+
+ /// Corresponds to `_format_version` in Python.
+ fn format_version(&self) -> u16 {
+ return BigEndian::read_u16(&self.header_bytes[2..4]);
+ }
+
+ const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader {
+ // We treat an empty file as a valid index with no entries.
+ // Here we make an arbitrary choice of what we assume the format of the
+ // index to be (V1, using generaldelta).
+ // This doesn't matter too much, since we're only doing read-only
+ // access. but the value corresponds to the `new_header` variable in
+ // `revlog.py`, `_loadindex`
+ header_bytes: [0, 3, 0, 1],
+ };
+
+ fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
+ if index_bytes.len() == 0 {
+ return Ok(IndexHeader::EMPTY_INDEX_HEADER);
+ }
+ if index_bytes.len() < 4 {
+ return Err(HgError::corrupted(
+ "corrupted revlog: can't read the index format header",
+ ));
+ }
+ return Ok(IndexHeader {
+ header_bytes: {
+ let bytes: [u8; 4] =
+ index_bytes[0..4].try_into().expect("impossible");
+ bytes
+ },
+ });
+ }
+}
+
/// A Revlog index
pub struct Index {
bytes: Box<dyn Deref<Target = [u8]> + Send>,
/// Offsets of starts of index blocks.
/// Only needed when the index is interleaved with data.
offsets: Option<Vec<usize>>,
+ uses_generaldelta: bool,
}
impl Index {
@@ -23,7 +93,20 @@
pub fn new(
bytes: Box<dyn Deref<Target = [u8]> + Send>,
) -> Result<Self, HgError> {
- if is_inline(&bytes) {
+ let header = IndexHeader::parse(bytes.as_ref())?;
+
+ if header.format_version() != IndexHeader::REVLOGV1 {
+ // A proper new version should have had a repo/store
+ // requirement.
+ return Err(HgError::corrupted("unsupported revlog version"));
+ }
+
+ // This is only correct because we know version is REVLOGV1.
+ // In v2 we always use generaldelta, while in v0 we never use
+ // generaldelta. Similar for [is_inline] (it's only used in v1).
+ let uses_generaldelta = header.format_flags().uses_generaldelta();
+
+ if header.format_flags().is_inline() {
let mut offset: usize = 0;
let mut offsets = Vec::new();
@@ -35,13 +118,14 @@
offset_override: None,
};
- offset += INDEX_ENTRY_SIZE + entry.compressed_len();
+ offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
}
if offset == bytes.len() {
Ok(Self {
bytes,
offsets: Some(offsets),
+ uses_generaldelta,
})
} else {
Err(HgError::corrupted("unexpected inline revlog length")
@@ -51,10 +135,15 @@
Ok(Self {
bytes,
offsets: None,
+ uses_generaldelta,
})
}
}
+ pub fn uses_generaldelta(&self) -> bool {
+ self.uses_generaldelta
+ }
+
/// Value of the inline flag.
pub fn is_inline(&self) -> bool {
self.offsets.is_some()
@@ -171,18 +260,22 @@
}
}
+ pub fn flags(&self) -> u16 {
+ BigEndian::read_u16(&self.bytes[6..=7])
+ }
+
/// Return the compressed length of the data.
- pub fn compressed_len(&self) -> usize {
- BigEndian::read_u32(&self.bytes[8..=11]) as usize
+ pub fn compressed_len(&self) -> u32 {
+ BigEndian::read_u32(&self.bytes[8..=11])
}
/// Return the uncompressed length of the data.
- pub fn uncompressed_len(&self) -> usize {
- BigEndian::read_u32(&self.bytes[12..=15]) as usize
+ pub fn uncompressed_len(&self) -> i32 {
+ BigEndian::read_i32(&self.bytes[12..=15])
}
/// Return the revision upon which the data has been derived.
- pub fn base_revision(&self) -> Revision {
+ pub fn base_revision_or_base_of_delta_chain(&self) -> Revision {
// TODO Maybe return an Option when base_revision == rev?
// Requires to add rev to IndexEntry
@@ -206,17 +299,6 @@
}
}
-/// Value of the inline flag.
-pub fn is_inline(index_bytes: &[u8]) -> bool {
- if index_bytes.len() < 4 {
- return true;
- }
- match &index_bytes[0..=1] {
- [0, 0] | [0, 2] => false,
- _ => true,
- }
-}
-
#[cfg(test)]
mod tests {
use super::*;
@@ -231,7 +313,7 @@
offset: usize,
compressed_len: usize,
uncompressed_len: usize,
- base_revision: Revision,
+ base_revision_or_base_of_delta_chain: Revision,
}
#[cfg(test)]
@@ -245,7 +327,7 @@
offset: 0,
compressed_len: 0,
uncompressed_len: 0,
- base_revision: 0,
+ base_revision_or_base_of_delta_chain: 0,
}
}
@@ -284,8 +366,11 @@
self
}
- pub fn with_base_revision(&mut self, value: Revision) -> &mut Self {
- self.base_revision = value;
+ pub fn with_base_revision_or_base_of_delta_chain(
+ &mut self,
+ value: Revision,
+ ) -> &mut Self {
+ self.base_revision_or_base_of_delta_chain = value;
self
}
@@ -308,42 +393,67 @@
bytes.extend(&[0u8; 2]); // Revision flags.
bytes.extend(&(self.compressed_len as u32).to_be_bytes());
bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
- bytes.extend(&self.base_revision.to_be_bytes());
+ bytes.extend(
+ &self.base_revision_or_base_of_delta_chain.to_be_bytes(),
+ );
bytes
}
}
+ pub fn is_inline(index_bytes: &[u8]) -> bool {
+ IndexHeader::parse(index_bytes)
+ .expect("too short")
+ .format_flags()
+ .is_inline()
+ }
+
+ pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
+ IndexHeader::parse(index_bytes)
+ .expect("too short")
+ .format_flags()
+ .uses_generaldelta()
+ }
+
+ pub fn get_version(index_bytes: &[u8]) -> u16 {
+ IndexHeader::parse(index_bytes)
+ .expect("too short")
+ .format_version()
+ }
+
#[test]
- fn is_not_inline_when_no_inline_flag_test() {
+ fn flags_when_no_inline_flag_test() {
let bytes = IndexEntryBuilder::new()
.is_first(true)
.with_general_delta(false)
.with_inline(false)
.build();
- assert_eq!(is_inline(&bytes), false)
+ assert_eq!(is_inline(&bytes), false);
+ assert_eq!(uses_generaldelta(&bytes), false);
}
#[test]
- fn is_inline_when_inline_flag_test() {
+ fn flags_when_inline_flag_test() {
let bytes = IndexEntryBuilder::new()
.is_first(true)
.with_general_delta(false)
.with_inline(true)
.build();
- assert_eq!(is_inline(&bytes), true)
+ assert_eq!(is_inline(&bytes), true);
+ assert_eq!(uses_generaldelta(&bytes), false);
}
#[test]
- fn is_inline_when_inline_and_generaldelta_flags_test() {
+ fn flags_when_inline_and_generaldelta_flags_test() {
let bytes = IndexEntryBuilder::new()
.is_first(true)
.with_general_delta(true)
.with_inline(true)
.build();
- assert_eq!(is_inline(&bytes), true)
+ assert_eq!(is_inline(&bytes), true);
+ assert_eq!(uses_generaldelta(&bytes), true);
}
#[test]
@@ -391,14 +501,26 @@
}
#[test]
- fn test_base_revision() {
- let bytes = IndexEntryBuilder::new().with_base_revision(1).build();
+ fn test_base_revision_or_base_of_delta_chain() {
+ let bytes = IndexEntryBuilder::new()
+ .with_base_revision_or_base_of_delta_chain(1)
+ .build();
let entry = IndexEntry {
bytes: &bytes,
offset_override: None,
};
- assert_eq!(entry.base_revision(), 1)
+ assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1)
+ }
+
+ #[test]
+ fn version_test() {
+ let bytes = IndexEntryBuilder::new()
+ .is_first(true)
+ .with_version(1)
+ .build();
+
+ assert_eq!(get_version(&bytes), 1)
}
}
--- a/rust/hg-core/src/revlog/manifest.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/manifest.rs Fri Feb 18 14:27:43 2022 +0100
@@ -4,6 +4,7 @@
use crate::revlog::Revision;
use crate::revlog::{Node, NodePrefix};
use crate::utils::hg_path::HgPath;
+use crate::utils::SliceExt;
/// A specialized `Revlog` to work with `manifest` data format.
pub struct Manifestlog {
@@ -43,7 +44,7 @@
&self,
rev: Revision,
) -> Result<Manifest, RevlogError> {
- let bytes = self.revlog.get_rev_data(rev)?;
+ let bytes = self.revlog.get_rev_data(rev)?.into_owned();
Ok(Manifest { bytes })
}
}
@@ -51,51 +52,142 @@
/// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
#[derive(Debug)]
pub struct Manifest {
+ /// Format for a manifest: flat sequence of variable-size entries,
+ /// sorted by path, each as:
+ ///
+ /// ```text
+ /// <path> \0 <hex_node_id> <flags> \n
+ /// ```
+ ///
+ /// The last entry is also terminated by a newline character.
+ /// Flags is one of `b""` (the empty string), `b"x"`, `b"l"`, or `b"t"`.
bytes: Vec<u8>,
}
impl Manifest {
- /// Return an iterator over the lines of the entry.
- pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
+ pub fn iter(
+ &self,
+ ) -> impl Iterator<Item = Result<ManifestEntry, HgError>> {
self.bytes
.split(|b| b == &b'\n')
.filter(|line| !line.is_empty())
- }
-
- /// Return an iterator over the files of the entry.
- pub fn files(&self) -> impl Iterator<Item = &HgPath> {
- self.lines().filter(|line| !line.is_empty()).map(|line| {
- let pos = line
- .iter()
- .position(|x| x == &b'\0')
- .expect("manifest line should contain \\0");
- HgPath::new(&line[..pos])
- })
- }
-
- /// Return an iterator over the files of the entry.
- pub fn files_with_nodes(&self) -> impl Iterator<Item = (&HgPath, &[u8])> {
- self.lines().filter(|line| !line.is_empty()).map(|line| {
- let pos = line
- .iter()
- .position(|x| x == &b'\0')
- .expect("manifest line should contain \\0");
- let hash_start = pos + 1;
- let hash_end = hash_start + 40;
- (HgPath::new(&line[..pos]), &line[hash_start..hash_end])
- })
+ .map(ManifestEntry::from_raw)
}
/// If the given path is in this manifest, return its filelog node ID
- pub fn find_file(&self, path: &HgPath) -> Result<Option<Node>, HgError> {
- // TODO: use binary search instead of linear scan. This may involve
- // building (and caching) an index of the byte indicex of each manifest
- // line.
- for (manifest_path, node) in self.files_with_nodes() {
- if manifest_path == path {
- return Ok(Some(Node::from_hex_for_repo(node)?));
+ pub fn find_by_path(
+ &self,
+ path: &HgPath,
+ ) -> Result<Option<ManifestEntry>, HgError> {
+ use std::cmp::Ordering::*;
+ let path = path.as_bytes();
+ // Both boundaries of this `&[u8]` slice are always at the boundary of
+ // an entry
+ let mut bytes = &*self.bytes;
+
+ // Binary search algorithm derived from `[T]::binary_search_by`
+ // <https://github.com/rust-lang/rust/blob/1.57.0/library/core/src/slice/mod.rs#L2221>
+ // except we don’t have a slice of entries. Instead we jump to the
+ // middle of the byte slice and look around for entry delimiters
+ // (newlines).
+ while let Some(entry_range) = Self::find_entry_near_middle_of(bytes)? {
+ let (entry_path, rest) =
+ ManifestEntry::split_path(&bytes[entry_range.clone()])?;
+ let cmp = entry_path.cmp(path);
+ if cmp == Less {
+ let after_newline = entry_range.end + 1;
+ bytes = &bytes[after_newline..];
+ } else if cmp == Greater {
+ bytes = &bytes[..entry_range.start];
+ } else {
+ return Ok(Some(ManifestEntry::from_path_and_rest(
+ entry_path, rest,
+ )));
}
}
Ok(None)
}
+
+ /// If there is at least one, return the byte range of an entry *excluding*
+ /// the final newline.
+ fn find_entry_near_middle_of(
+ bytes: &[u8],
+ ) -> Result<Option<std::ops::Range<usize>>, HgError> {
+ let len = bytes.len();
+ if len > 0 {
+ let middle = bytes.len() / 2;
+ // Integer division rounds down, so `middle < len`.
+ let (before, after) = bytes.split_at(middle);
+ let is_newline = |&byte: &u8| byte == b'\n';
+ let entry_start = match before.iter().rposition(is_newline) {
+ Some(i) => i + 1,
+ None => 0, // We choose the first entry in `bytes`
+ };
+ let entry_end = match after.iter().position(is_newline) {
+ Some(i) => {
+ // No `+ 1` here to exclude this newline from the range
+ middle + i
+ }
+ None => {
+ // In a well-formed manifest:
+ //
+ // * Since `len > 0`, `bytes` contains at least one entry
+ // * Every entry ends with a newline
+ // * Since `middle < len`, `after` contains at least the
+ // newline at the end of the last entry of `bytes`.
+ //
+ // We didn’t find a newline, so this manifest is not
+ // well-formed.
+ return Err(HgError::corrupted(
+ "manifest entry without \\n delimiter",
+ ));
+ }
+ };
+ Ok(Some(entry_start..entry_end))
+ } else {
+ // len == 0
+ Ok(None)
+ }
+ }
}
+
+/// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
+#[derive(Debug)]
+pub struct ManifestEntry<'manifest> {
+ pub path: &'manifest HgPath,
+ pub hex_node_id: &'manifest [u8],
+
+ /// `Some` values are b'x', b'l', or 't'
+ pub flags: Option<u8>,
+}
+
+impl<'a> ManifestEntry<'a> {
+ fn split_path(bytes: &[u8]) -> Result<(&[u8], &[u8]), HgError> {
+ bytes.split_2(b'\0').ok_or_else(|| {
+ HgError::corrupted("manifest entry without \\0 delimiter")
+ })
+ }
+
+ fn from_path_and_rest(path: &'a [u8], rest: &'a [u8]) -> Self {
+ let (hex_node_id, flags) = match rest.split_last() {
+ Some((&b'x', rest)) => (rest, Some(b'x')),
+ Some((&b'l', rest)) => (rest, Some(b'l')),
+ Some((&b't', rest)) => (rest, Some(b't')),
+ _ => (rest, None),
+ };
+ Self {
+ path: HgPath::new(path),
+ hex_node_id,
+ flags,
+ }
+ }
+
+ fn from_raw(bytes: &'a [u8]) -> Result<Self, HgError> {
+ let (path, rest) = Self::split_path(bytes)?;
+ Ok(Self::from_path_and_rest(path, rest))
+ }
+
+ pub fn node_id(&self) -> Result<Node, HgError> {
+ Node::from_hex_for_repo(self.hex_node_id)
+ }
+}
--- a/rust/hg-core/src/revlog/node.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/node.rs Fri Feb 18 14:27:43 2022 +0100
@@ -174,6 +174,12 @@
data: self.data,
}
}
+
+ pub fn pad_to_256_bits(&self) -> [u8; 32] {
+ let mut bits = [0; 32];
+ bits[..NODE_BYTES_LENGTH].copy_from_slice(&self.data);
+ bits
+ }
}
/// The beginning of a binary revision SHA.
--- a/rust/hg-core/src/revlog/revlog.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/revlog/revlog.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,9 +1,9 @@
use std::borrow::Cow;
+use std::convert::TryFrom;
use std::io::Read;
use std::ops::Deref;
use std::path::Path;
-use byteorder::{BigEndian, ByteOrder};
use flate2::read::ZlibDecoder;
use micro_timer::timed;
use sha1::{Digest, Sha1};
@@ -20,6 +20,18 @@
use crate::revlog::Revision;
use crate::{Node, NULL_REVISION};
+const REVISION_FLAG_CENSORED: u16 = 1 << 15;
+const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
+const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
+const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
+
+// Keep this in sync with REVIDX_KNOWN_FLAGS in
+// mercurial/revlogutils/flagutil.py
+const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
+ | REVISION_FLAG_ELLIPSIS
+ | REVISION_FLAG_EXTSTORED
+ | REVISION_FLAG_HASCOPIESINFO;
+
#[derive(derive_more::From)]
pub enum RevlogError {
InvalidRevision,
@@ -40,9 +52,13 @@
}
}
+fn corrupted() -> HgError {
+ HgError::corrupted("corrupted revlog")
+}
+
impl RevlogError {
fn corrupted() -> Self {
- RevlogError::Other(HgError::corrupted("corrupted revlog"))
+ RevlogError::Other(corrupted())
}
}
@@ -74,13 +90,6 @@
match repo.store_vfs().mmap_open_opt(&index_path)? {
None => Index::new(Box::new(vec![])),
Some(index_mmap) => {
- let version = get_version(&index_mmap)?;
- if version != 1 {
- // A proper new version should have had a repo/store
- // requirement.
- return Err(HgError::corrupted("corrupted revlog"));
- }
-
let index = Index::new(Box::new(index_mmap))?;
Ok(index)
}
@@ -192,42 +201,14 @@
/// retrieved as needed, and the deltas will be applied to the inital
/// snapshot to rebuild the final data.
#[timed]
- pub fn get_rev_data(&self, rev: Revision) -> Result<Vec<u8>, RevlogError> {
+ pub fn get_rev_data(
+ &self,
+ rev: Revision,
+ ) -> Result<Cow<[u8]>, RevlogError> {
if rev == NULL_REVISION {
- return Ok(vec![]);
+ return Ok(Cow::Borrowed(&[]));
};
- // Todo return -> Cow
- let mut entry = self.get_entry(rev)?;
- let mut delta_chain = vec![];
- while let Some(base_rev) = entry.base_rev {
- delta_chain.push(entry);
- entry = self
- .get_entry(base_rev)
- .map_err(|_| RevlogError::corrupted())?;
- }
-
- // TODO do not look twice in the index
- let index_entry = self
- .index
- .get_entry(rev)
- .ok_or(RevlogError::InvalidRevision)?;
-
- let data: Vec<u8> = if delta_chain.is_empty() {
- entry.data()?.into()
- } else {
- Revlog::build_data_from_deltas(entry, &delta_chain)?
- };
-
- if self.check_hash(
- index_entry.p1(),
- index_entry.p2(),
- index_entry.hash().as_bytes(),
- &data,
- ) {
- Ok(data)
- } else {
- Err(RevlogError::corrupted())
- }
+ Ok(self.get_entry(rev)?.data()?)
}
/// Check the hash of some given data against the recorded hash.
@@ -258,13 +239,13 @@
fn build_data_from_deltas(
snapshot: RevlogEntry,
deltas: &[RevlogEntry],
- ) -> Result<Vec<u8>, RevlogError> {
- let snapshot = snapshot.data()?;
+ ) -> Result<Vec<u8>, HgError> {
+ let snapshot = snapshot.data_chunk()?;
let deltas = deltas
.iter()
.rev()
- .map(RevlogEntry::data)
- .collect::<Result<Vec<Cow<'_, [u8]>>, RevlogError>>()?;
+ .map(RevlogEntry::data_chunk)
+ .collect::<Result<Vec<_>, _>>()?;
let patches: Vec<_> =
deltas.iter().map(|d| patch::PatchList::new(d)).collect();
let patch = patch::fold_patch_lists(&patches);
@@ -282,42 +263,67 @@
}
/// Get an entry of the revlog.
- fn get_entry(&self, rev: Revision) -> Result<RevlogEntry, RevlogError> {
+ pub fn get_entry(
+ &self,
+ rev: Revision,
+ ) -> Result<RevlogEntry, RevlogError> {
let index_entry = self
.index
.get_entry(rev)
.ok_or(RevlogError::InvalidRevision)?;
let start = index_entry.offset();
- let end = start + index_entry.compressed_len();
+ let end = start + index_entry.compressed_len() as usize;
let data = if self.index.is_inline() {
self.index.data(start, end)
} else {
&self.data()[start..end]
};
let entry = RevlogEntry {
+ revlog: self,
rev,
bytes: data,
compressed_len: index_entry.compressed_len(),
uncompressed_len: index_entry.uncompressed_len(),
- base_rev: if index_entry.base_revision() == rev {
+ base_rev_or_base_of_delta_chain: if index_entry
+ .base_revision_or_base_of_delta_chain()
+ == rev
+ {
None
} else {
- Some(index_entry.base_revision())
+ Some(index_entry.base_revision_or_base_of_delta_chain())
},
+ p1: index_entry.p1(),
+ p2: index_entry.p2(),
+ flags: index_entry.flags(),
+ hash: *index_entry.hash(),
};
Ok(entry)
}
+
+ /// when resolving internal references within revlog, any errors
+ /// should be reported as corruption, instead of e.g. "invalid revision"
+ fn get_entry_internal(
+ &self,
+ rev: Revision,
+ ) -> Result<RevlogEntry, HgError> {
+ return self.get_entry(rev).map_err(|_| corrupted());
+ }
}
/// The revlog entry's bytes and the necessary informations to extract
/// the entry's data.
-#[derive(Debug)]
+#[derive(Clone)]
pub struct RevlogEntry<'a> {
+ revlog: &'a Revlog,
rev: Revision,
bytes: &'a [u8],
- compressed_len: usize,
- uncompressed_len: usize,
- base_rev: Option<Revision>,
+ compressed_len: u32,
+ uncompressed_len: i32,
+ base_rev_or_base_of_delta_chain: Option<Revision>,
+ p1: Revision,
+ p2: Revision,
+ flags: u16,
+ hash: Node,
}
impl<'a> RevlogEntry<'a> {
@@ -325,8 +331,65 @@
self.rev
}
+ pub fn uncompressed_len(&self) -> Option<u32> {
+ u32::try_from(self.uncompressed_len).ok()
+ }
+
+ pub fn has_p1(&self) -> bool {
+ self.p1 != NULL_REVISION
+ }
+
+ pub fn is_cencored(&self) -> bool {
+ (self.flags & REVISION_FLAG_CENSORED) != 0
+ }
+
+ pub fn has_length_affecting_flag_processor(&self) -> bool {
+ // Relevant Python code: revlog.size()
+ // note: ELLIPSIS is known to not change the content
+ (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
+ }
+
+ /// The data for this entry, after resolving deltas if any.
+ pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
+ let mut entry = self.clone();
+ let mut delta_chain = vec![];
+
+ // The meaning of `base_rev_or_base_of_delta_chain` depends on
+ // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
+ // `mercurial/revlogutils/constants.py` and the code in
+ // [_chaininfo] and in [index_deltachain].
+ let uses_generaldelta = self.revlog.index.uses_generaldelta();
+ while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
+ let base_rev = if uses_generaldelta {
+ base_rev
+ } else {
+ entry.rev - 1
+ };
+ delta_chain.push(entry);
+ entry = self.revlog.get_entry_internal(base_rev)?;
+ }
+
+ let data = if delta_chain.is_empty() {
+ entry.data_chunk()?
+ } else {
+ Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
+ };
+
+ if self.revlog.check_hash(
+ self.p1,
+ self.p2,
+ self.hash.as_bytes(),
+ &data,
+ ) {
+ Ok(data)
+ } else {
+ Err(corrupted())
+ }
+ }
+
/// Extract the data contained in the entry.
- pub fn data(&self) -> Result<Cow<'_, [u8]>, RevlogError> {
+ /// This may be a delta. (See `is_delta`.)
+ fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
if self.bytes.is_empty() {
return Ok(Cow::Borrowed(&[]));
}
@@ -341,39 +404,37 @@
// zstd data.
b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
// A proper new format should have had a repo/store requirement.
- _format_type => Err(RevlogError::corrupted()),
+ _format_type => Err(corrupted()),
}
}
- fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, RevlogError> {
+ fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
let mut decoder = ZlibDecoder::new(self.bytes);
if self.is_delta() {
- let mut buf = Vec::with_capacity(self.compressed_len);
- decoder
- .read_to_end(&mut buf)
- .map_err(|_| RevlogError::corrupted())?;
+ let mut buf = Vec::with_capacity(self.compressed_len as usize);
+ decoder.read_to_end(&mut buf).map_err(|_| corrupted())?;
Ok(buf)
} else {
- let mut buf = vec![0; self.uncompressed_len];
- decoder
- .read_exact(&mut buf)
- .map_err(|_| RevlogError::corrupted())?;
+ let cap = self.uncompressed_len.max(0) as usize;
+ let mut buf = vec![0; cap];
+ decoder.read_exact(&mut buf).map_err(|_| corrupted())?;
Ok(buf)
}
}
- fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, RevlogError> {
+ fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
if self.is_delta() {
- let mut buf = Vec::with_capacity(self.compressed_len);
+ let mut buf = Vec::with_capacity(self.compressed_len as usize);
zstd::stream::copy_decode(self.bytes, &mut buf)
- .map_err(|_| RevlogError::corrupted())?;
+ .map_err(|_| corrupted())?;
Ok(buf)
} else {
- let mut buf = vec![0; self.uncompressed_len];
+ let cap = self.uncompressed_len.max(0) as usize;
+ let mut buf = vec![0; cap];
let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
- .map_err(|_| RevlogError::corrupted())?;
- if len != self.uncompressed_len {
- Err(RevlogError::corrupted())
+ .map_err(|_| corrupted())?;
+ if len != self.uncompressed_len as usize {
+ Err(corrupted())
} else {
Ok(buf)
}
@@ -383,23 +444,10 @@
/// Tell if the entry is a snapshot or a delta
/// (influences on decompression).
fn is_delta(&self) -> bool {
- self.base_rev.is_some()
+ self.base_rev_or_base_of_delta_chain.is_some()
}
}
-/// Format version of the revlog.
-pub fn get_version(index_bytes: &[u8]) -> Result<u16, HgError> {
- if index_bytes.len() == 0 {
- return Ok(1);
- };
- if index_bytes.len() < 4 {
- return Err(HgError::corrupted(
- "corrupted revlog: can't read the index format header",
- ));
- };
- Ok(BigEndian::read_u16(&index_bytes[2..=3]))
-}
-
/// Calculate the hash of a revision given its data and its parents.
fn hash(
data: &[u8],
@@ -418,20 +466,3 @@
hasher.update(data);
*hasher.finalize().as_ref()
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use super::super::index::IndexEntryBuilder;
-
- #[test]
- fn version_test() {
- let bytes = IndexEntryBuilder::new()
- .is_first(true)
- .with_version(1)
- .build();
-
- assert_eq!(get_version(&bytes).map_err(|_err| ()), Ok(1))
- }
-}
--- a/rust/hg-core/src/utils.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/utils.rs Fri Feb 18 14:27:43 2022 +0100
@@ -145,6 +145,21 @@
}
}
+pub trait StrExt {
+ // TODO: Use https://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_once
+ // once we require Rust 1.52+
+ fn split_2(&self, separator: char) -> Option<(&str, &str)>;
+}
+
+impl StrExt for str {
+ fn split_2(&self, separator: char) -> Option<(&str, &str)> {
+ let mut iter = self.splitn(2, separator);
+ let a = iter.next()?;
+ let b = iter.next()?;
+ Some((a, b))
+ }
+}
+
pub trait Escaped {
/// Return bytes escaped for display to the user
fn escaped_bytes(&self) -> Vec<u8>;
--- a/rust/hg-core/src/vfs.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/src/vfs.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,6 +1,6 @@
use crate::errors::{HgError, IoErrorContext, IoResultExt};
use memmap2::{Mmap, MmapOptions};
-use std::io::ErrorKind;
+use std::io::{ErrorKind, Write};
use std::path::{Path, PathBuf};
/// Filesystem access abstraction for the contents of a given "base" diretory
@@ -16,6 +16,22 @@
self.base.join(relative_path)
}
+ pub fn symlink_metadata(
+ &self,
+ relative_path: impl AsRef<Path>,
+ ) -> Result<std::fs::Metadata, HgError> {
+ let path = self.join(relative_path);
+ std::fs::symlink_metadata(&path).when_reading_file(&path)
+ }
+
+ pub fn read_link(
+ &self,
+ relative_path: impl AsRef<Path>,
+ ) -> Result<PathBuf, HgError> {
+ let path = self.join(relative_path);
+ std::fs::read_link(&path).when_reading_file(&path)
+ }
+
pub fn read(
&self,
relative_path: impl AsRef<Path>,
@@ -71,6 +87,47 @@
std::fs::rename(&from, &to)
.with_context(|| IoErrorContext::RenamingFile { from, to })
}
+
+ pub fn remove_file(
+ &self,
+ relative_path: impl AsRef<Path>,
+ ) -> Result<(), HgError> {
+ let path = self.join(relative_path);
+ std::fs::remove_file(&path)
+ .with_context(|| IoErrorContext::RemovingFile(path))
+ }
+
+ #[cfg(unix)]
+ pub fn create_symlink(
+ &self,
+ relative_link_path: impl AsRef<Path>,
+ target_path: impl AsRef<Path>,
+ ) -> Result<(), HgError> {
+ let link_path = self.join(relative_link_path);
+ std::os::unix::fs::symlink(target_path, &link_path)
+ .when_writing_file(&link_path)
+ }
+
+ /// Write `contents` into a temporary file, then rename to `relative_path`.
+ /// This makes writing to a file "atomic": a reader opening that path will
+ /// see either the previous contents of the file or the complete new
+ /// content, never a partial write.
+ pub fn atomic_write(
+ &self,
+ relative_path: impl AsRef<Path>,
+ contents: &[u8],
+ ) -> Result<(), HgError> {
+ let mut tmp = tempfile::NamedTempFile::new_in(self.base)
+ .when_writing_file(self.base)?;
+ tmp.write_all(contents)
+ .and_then(|()| tmp.flush())
+ .when_writing_file(tmp.path())?;
+ let path = self.join(relative_path);
+ tmp.persist(&path)
+ .map_err(|e| e.error)
+ .when_writing_file(&path)?;
+ Ok(())
+ }
}
fn fs_metadata(
--- a/rust/hg-core/tests/test_missing_ancestors.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-core/tests/test_missing_ancestors.rs Fri Feb 18 14:27:43 2022 +0100
@@ -32,11 +32,11 @@
if i == 2 || rng.gen_bool(prevprob) {
(i - 1) as Revision
} else {
- rng.gen_range(0, i - 1) as Revision
+ rng.gen_range(0..i - 1) as Revision
}
};
// p2 is a random revision lower than i and different from p1
- let mut p2 = rng.gen_range(0, i - 1) as Revision;
+ let mut p2 = rng.gen_range(0..i - 1) as Revision;
if p2 >= p1 {
p2 = p2 + 1;
}
@@ -44,7 +44,7 @@
} else if rng.gen_bool(prevprob) {
vg.push([(i - 1) as Revision, NULL_REVISION])
} else {
- vg.push([rng.gen_range(0, i - 1) as Revision, NULL_REVISION])
+ vg.push([rng.gen_range(0..i - 1) as Revision, NULL_REVISION])
}
}
vg
--- a/rust/hg-cpython/Cargo.toml Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/Cargo.toml Fri Feb 18 14:27:43 2022 +0100
@@ -28,3 +28,5 @@
log = "0.4.8"
env_logger = "0.7.1"
stable_deref_trait = "1.2.0"
+vcsgraph = "0.2.0"
+
--- a/rust/hg-cpython/src/ancestors.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/ancestors.rs Fri Feb 18 14:27:43 2022 +0100
@@ -42,20 +42,21 @@
ObjectProtocol, PyClone, PyDict, PyList, PyModule, PyObject, PyResult,
Python, PythonObject, ToPyObject,
};
+use hg::MissingAncestors as CoreMissing;
use hg::Revision;
-use hg::{
- AncestorsIterator as CoreIterator, LazyAncestors as CoreLazy,
- MissingAncestors as CoreMissing,
-};
use std::cell::RefCell;
use std::collections::HashSet;
+use vcsgraph::lazy_ancestors::{
+ AncestorsIterator as VCGAncestorsIterator,
+ LazyAncestors as VCGLazyAncestors,
+};
py_class!(pub class AncestorsIterator |py| {
- data inner: RefCell<Box<CoreIterator<Index>>>;
+ data inner: RefCell<Box<VCGAncestorsIterator<Index>>>;
def __next__(&self) -> PyResult<Option<Revision>> {
match self.inner(py).borrow_mut().next() {
- Some(Err(e)) => Err(GraphError::pynew(py, e)),
+ Some(Err(e)) => Err(GraphError::pynew_from_vcsgraph(py, e)),
None => Ok(None),
Some(Ok(r)) => Ok(Some(r)),
}
@@ -63,7 +64,7 @@
def __contains__(&self, rev: Revision) -> PyResult<bool> {
self.inner(py).borrow_mut().contains(rev)
- .map_err(|e| GraphError::pynew(py, e))
+ .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))
}
def __iter__(&self) -> PyResult<Self> {
@@ -73,32 +74,35 @@
def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision,
inclusive: bool) -> PyResult<AncestorsIterator> {
let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
- let ait = CoreIterator::new(
+ let ait = VCGAncestorsIterator::new(
pyindex_to_graph(py, index)?,
initvec,
stoprev,
inclusive,
)
- .map_err(|e| GraphError::pynew(py, e))?;
+ .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?;
AncestorsIterator::from_inner(py, ait)
}
});
impl AncestorsIterator {
- pub fn from_inner(py: Python, ait: CoreIterator<Index>) -> PyResult<Self> {
+ pub fn from_inner(
+ py: Python,
+ ait: VCGAncestorsIterator<Index>,
+ ) -> PyResult<Self> {
Self::create_instance(py, RefCell::new(Box::new(ait)))
}
}
py_class!(pub class LazyAncestors |py| {
- data inner: RefCell<Box<CoreLazy<Index>>>;
+ data inner: RefCell<Box<VCGLazyAncestors<Index>>>;
def __contains__(&self, rev: Revision) -> PyResult<bool> {
self.inner(py)
.borrow_mut()
.contains(rev)
- .map_err(|e| GraphError::pynew(py, e))
+ .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))
}
def __iter__(&self) -> PyResult<AncestorsIterator> {
@@ -114,9 +118,9 @@
let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
let lazy =
- CoreLazy::new(pyindex_to_graph(py, index)?,
+ VCGLazyAncestors::new(pyindex_to_graph(py, index)?,
initvec, stoprev, inclusive)
- .map_err(|e| GraphError::pynew(py, e))?;
+ .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?;
Self::create_instance(py, RefCell::new(Box::new(lazy)))
}
--- a/rust/hg-cpython/src/cindex.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/cindex.rs Fri Feb 18 14:27:43 2022 +0100
@@ -155,6 +155,24 @@
}
}
+impl vcsgraph::graph::Graph for Index {
+ fn parents(
+ &self,
+ rev: Revision,
+ ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
+ {
+ match Graph::parents(self, rev) {
+ Ok(parents) => Ok(vcsgraph::graph::Parents(parents)),
+ Err(GraphError::ParentOutOfRange(rev)) => {
+ Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev))
+ }
+ Err(GraphError::WorkingDirectoryUnsupported) => Err(
+ vcsgraph::graph::GraphReadError::WorkingDirectoryUnsupported,
+ ),
+ }
+ }
+}
+
impl RevlogIndex for Index {
/// Note C return type is Py_ssize_t (hence signed), but we shall
/// force it to unsigned, because it's a length
--- a/rust/hg-cpython/src/dirstate.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/dirstate.rs Fri Feb 18 14:27:43 2022 +0100
@@ -54,7 +54,6 @@
matcher: PyObject,
ignorefiles: PyList,
check_exec: bool,
- last_normal_time: (u32, u32),
list_clean: bool,
list_ignored: bool,
list_unknown: bool,
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Fri Feb 18 14:27:43 2022 +0100
@@ -18,7 +18,7 @@
use crate::{
dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
- dirstate::item::{timestamp, DirstateItem},
+ dirstate::item::DirstateItem,
pybytes_deref::PyBytesDeref,
};
use hg::{
@@ -194,16 +194,13 @@
&self,
p1: PyObject,
p2: PyObject,
- now: (u32, u32)
) -> PyResult<PyBytes> {
- let now = timestamp(py, now)?;
-
- let mut inner = self.inner(py).borrow_mut();
+ let inner = self.inner(py).borrow();
let parents = DirstateParents {
p1: extract_node_id(py, &p1)?,
p2: extract_node_id(py, &p2)?,
};
- let result = inner.pack_v1(parents, now);
+ let result = inner.pack_v1(parents);
match result {
Ok(packed) => Ok(PyBytes::new(py, &packed)),
Err(_) => Err(PyErr::new::<exc::OSError, _>(
@@ -218,17 +215,14 @@
/// instead of written to a new data file (False).
def write_v2(
&self,
- now: (u32, u32),
can_append: bool,
) -> PyResult<PyObject> {
- let now = timestamp(py, now)?;
-
- let mut inner = self.inner(py).borrow_mut();
- let result = inner.pack_v2(now, can_append);
+ let inner = self.inner(py).borrow();
+ let result = inner.pack_v2(can_append);
match result {
Ok((packed, tree_metadata, append)) => {
let packed = PyBytes::new(py, &packed);
- let tree_metadata = PyBytes::new(py, &tree_metadata);
+ let tree_metadata = PyBytes::new(py, tree_metadata.as_bytes());
let tuple = (packed, tree_metadata, append);
Ok(tuple.to_py_object(py).into_object())
},
--- a/rust/hg-cpython/src/dirstate/item.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/dirstate/item.rs Fri Feb 18 14:27:43 2022 +0100
@@ -23,7 +23,7 @@
p2_info: bool = false,
has_meaningful_data: bool = true,
has_meaningful_mtime: bool = true,
- parentfiledata: Option<(u32, u32, (u32, u32))> = None,
+ parentfiledata: Option<(u32, u32, Option<(u32, u32, bool)>)> = None,
fallback_exec: Option<bool> = None,
fallback_symlink: Option<bool> = None,
@@ -35,7 +35,9 @@
mode_size_opt = Some((mode, size))
}
if has_meaningful_mtime {
- mtime_opt = Some(timestamp(py, mtime)?)
+ if let Some(m) = mtime {
+ mtime_opt = Some(timestamp(py, m)?);
+ }
}
}
let entry = DirstateEntry::from_v2_data(
@@ -192,12 +194,8 @@
Ok(mtime)
}
- def need_delay(&self, now: (u32, u32)) -> PyResult<bool> {
- let now = timestamp(py, now)?;
- Ok(self.entry(py).get().need_delay(now))
- }
-
- def mtime_likely_equal_to(&self, other: (u32, u32)) -> PyResult<bool> {
+ def mtime_likely_equal_to(&self, other: (u32, u32, bool))
+ -> PyResult<bool> {
if let Some(mtime) = self.entry(py).get().truncated_mtime() {
Ok(mtime.likely_equal(timestamp(py, other)?))
} else {
@@ -230,7 +228,7 @@
&self,
mode: u32,
size: u32,
- mtime: (u32, u32),
+ mtime: (u32, u32, bool),
) -> PyResult<PyNone> {
let mtime = timestamp(py, mtime)?;
self.update(py, |entry| entry.set_clean(mode, size, mtime));
@@ -275,12 +273,13 @@
pub(crate) fn timestamp(
py: Python<'_>,
- (s, ns): (u32, u32),
+ (s, ns, second_ambiguous): (u32, u32, bool),
) -> PyResult<TruncatedTimestamp> {
- TruncatedTimestamp::from_already_truncated(s, ns).map_err(|_| {
- PyErr::new::<exc::ValueError, _>(
- py,
- "expected mtime truncated to 31 bits",
- )
- })
+ TruncatedTimestamp::from_already_truncated(s, ns, second_ambiguous)
+ .map_err(|_| {
+ PyErr::new::<exc::ValueError, _>(
+ py,
+ "expected mtime truncated to 31 bits",
+ )
+ })
}
--- a/rust/hg-cpython/src/dirstate/status.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/dirstate/status.rs Fri Feb 18 14:27:43 2022 +0100
@@ -9,13 +9,12 @@
//! `hg-core` crate. From Python, this will be seen as
//! `rustext.dirstate.status`.
-use crate::dirstate::item::timestamp;
use crate::{dirstate::DirstateMap, exceptions::FallbackError};
-use cpython::exc::OSError;
use cpython::{
exc::ValueError, ObjectProtocol, PyBytes, PyErr, PyList, PyObject,
PyResult, PyTuple, Python, PythonObject, ToPyObject,
};
+use hg::dirstate::status::StatusPath;
use hg::{
matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
parse_pattern_syntax,
@@ -28,15 +27,19 @@
};
use std::borrow::Borrow;
+fn collect_status_path_list(py: Python, paths: &[StatusPath<'_>]) -> PyList {
+ collect_pybytes_list(py, paths.iter().map(|item| &*item.path))
+}
+
/// This will be useless once trait impls for collection are added to `PyBytes`
/// upstream.
fn collect_pybytes_list(
py: Python,
- collection: &[impl AsRef<HgPath>],
+ iter: impl Iterator<Item = impl AsRef<HgPath>>,
) -> PyList {
let list = PyList::new(py, &[]);
- for path in collection.iter() {
+ for path in iter {
list.append(
py,
PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
@@ -91,7 +94,6 @@
PyErr::new::<FallbackError, _>(py, &as_string)
}
- StatusError::IO(e) => PyErr::new::<OSError, _>(py, e.to_string()),
e => PyErr::new::<ValueError, _>(py, e.to_string()),
}
}
@@ -103,13 +105,11 @@
root_dir: PyObject,
ignore_files: PyList,
check_exec: bool,
- last_normal_time: (u32, u32),
list_clean: bool,
list_ignored: bool,
list_unknown: bool,
collect_traversed_dirs: bool,
) -> PyResult<PyTuple> {
- let last_normal_time = timestamp(py, last_normal_time)?;
let bytes = root_dir.extract::<PyBytes>(py)?;
let root_dir = get_path_from_bytes(bytes.data(py));
@@ -124,6 +124,8 @@
})
.collect();
let ignore_files = ignore_files?;
+ // The caller may call `copymap.items()` separately
+ let list_copies = false;
match matcher.get_type(py).name(py).borrow() {
"alwaysmatcher" => {
@@ -135,10 +137,10 @@
ignore_files,
StatusOptions {
check_exec,
- last_normal_time,
list_clean,
list_ignored,
list_unknown,
+ list_copies,
collect_traversed_dirs,
},
)
@@ -172,10 +174,10 @@
ignore_files,
StatusOptions {
check_exec,
- last_normal_time,
list_clean,
list_ignored,
list_unknown,
+ list_copies,
collect_traversed_dirs,
},
)
@@ -224,10 +226,10 @@
ignore_files,
StatusOptions {
check_exec,
- last_normal_time,
list_clean,
list_ignored,
list_unknown,
+ list_copies,
collect_traversed_dirs,
},
)
@@ -247,16 +249,16 @@
status_res: DirstateStatus,
warnings: Vec<PatternFileWarning>,
) -> PyResult<PyTuple> {
- let modified = collect_pybytes_list(py, status_res.modified.as_ref());
- let added = collect_pybytes_list(py, status_res.added.as_ref());
- let removed = collect_pybytes_list(py, status_res.removed.as_ref());
- let deleted = collect_pybytes_list(py, status_res.deleted.as_ref());
- let clean = collect_pybytes_list(py, status_res.clean.as_ref());
- let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
- let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
- let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
- let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
- let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
+ let modified = collect_status_path_list(py, &status_res.modified);
+ let added = collect_status_path_list(py, &status_res.added);
+ let removed = collect_status_path_list(py, &status_res.removed);
+ let deleted = collect_status_path_list(py, &status_res.deleted);
+ let clean = collect_status_path_list(py, &status_res.clean);
+ let ignored = collect_status_path_list(py, &status_res.ignored);
+ let unknown = collect_status_path_list(py, &status_res.unknown);
+ let unsure = collect_status_path_list(py, &status_res.unsure);
+ let bad = collect_bad_matches(py, &status_res.bad)?;
+ let traversed = collect_pybytes_list(py, status_res.traversed.iter());
let dirty = status_res.dirty.to_py_object(py);
let py_warnings = PyList::new(py, &[]);
for warning in warnings.iter() {
--- a/rust/hg-cpython/src/exceptions.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/hg-cpython/src/exceptions.rs Fri Feb 18 14:27:43 2022 +0100
@@ -37,6 +37,32 @@
}
}
}
+
+ pub fn pynew_from_vcsgraph(
+ py: Python,
+ inner: vcsgraph::graph::GraphReadError,
+ ) -> PyErr {
+ match inner {
+ vcsgraph::graph::GraphReadError::InconsistentGraphData => {
+ GraphError::new(py, "InconsistentGraphData")
+ }
+ vcsgraph::graph::GraphReadError::InvalidKey => {
+ GraphError::new(py, "ParentOutOfRange")
+ }
+ vcsgraph::graph::GraphReadError::KeyedInvalidKey(r) => {
+ GraphError::new(py, ("ParentOutOfRange", r))
+ }
+ vcsgraph::graph::GraphReadError::WorkingDirectoryUnsupported => {
+ match py
+ .import("mercurial.error")
+ .and_then(|m| m.get(py, "WdirUnsupported"))
+ {
+ Err(e) => e,
+ Ok(cls) => PyErr::from_instance(py, cls),
+ }
+ }
+ }
+ }
}
py_exception!(rustext, HgPathPyError, RuntimeError);
--- a/rust/rhg/Cargo.toml Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/Cargo.toml Fri Feb 18 14:27:43 2022 +0100
@@ -8,6 +8,7 @@
edition = "2018"
[dependencies]
+atty = "0.2"
hg-core = { path = "../hg-core"}
chrono = "0.4.19"
clap = "2.33.1"
@@ -18,5 +19,5 @@
micro-timer = "0.3.1"
regex = "1.3.9"
env_logger = "0.7.1"
-format-bytes = "0.2.1"
+format-bytes = "0.3.0"
users = "0.11.0"
--- a/rust/rhg/src/blackbox.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/blackbox.rs Fri Feb 18 14:27:43 2022 +0100
@@ -13,7 +13,7 @@
const DEFAULT_MAX_FILES: u32 = 7;
// Python does not support %.3f, only %f
-const DEFAULT_DATE_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f";
+const DEFAULT_DATE_FORMAT: &str = "%Y-%m-%d %H:%M:%S%.3f";
type DateTime = chrono::DateTime<chrono::Local>;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/color.rs Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,255 @@
+use crate::ui::formatted;
+use crate::ui::plain;
+use format_bytes::write_bytes;
+use hg::config::Config;
+use hg::config::ConfigOrigin;
+use hg::errors::HgError;
+use std::collections::HashMap;
+
+pub type Effect = u32;
+
+pub type EffectsMap = HashMap<Vec<u8>, Vec<Effect>>;
+
+macro_rules! effects {
+ ($( $name: ident: $value: expr ,)+) => {
+
+ #[allow(non_upper_case_globals)]
+ mod effects {
+ $(
+ pub const $name: super::Effect = $value;
+ )+
+ }
+
+ fn effect(name: &[u8]) -> Option<Effect> {
+ $(
+ if name == stringify!($name).as_bytes() {
+ Some(effects::$name)
+ } else
+ )+
+ {
+ None
+ }
+ }
+ };
+}
+
+effects! {
+ none: 0,
+ black: 30,
+ red: 31,
+ green: 32,
+ yellow: 33,
+ blue: 34,
+ magenta: 35,
+ cyan: 36,
+ white: 37,
+ bold: 1,
+ italic: 3,
+ underline: 4,
+ inverse: 7,
+ dim: 2,
+ black_background: 40,
+ red_background: 41,
+ green_background: 42,
+ yellow_background: 43,
+ blue_background: 44,
+ purple_background: 45,
+ cyan_background: 46,
+ white_background: 47,
+}
+
+macro_rules! default_styles {
+ ($( $key: expr => [$($value: expr),*],)+) => {
+ fn default_styles() -> EffectsMap {
+ use effects::*;
+ let mut map = HashMap::new();
+ $(
+ map.insert($key[..].to_owned(), vec![$( $value ),*]);
+ )+
+ map
+ }
+ };
+}
+
+default_styles! {
+ b"grep.match" => [red, bold],
+ b"grep.linenumber" => [green],
+ b"grep.rev" => [blue],
+ b"grep.sep" => [cyan],
+ b"grep.filename" => [magenta],
+ b"grep.user" => [magenta],
+ b"grep.date" => [magenta],
+ b"grep.inserted" => [green, bold],
+ b"grep.deleted" => [red, bold],
+ b"bookmarks.active" => [green],
+ b"branches.active" => [none],
+ b"branches.closed" => [black, bold],
+ b"branches.current" => [green],
+ b"branches.inactive" => [none],
+ b"diff.changed" => [white],
+ b"diff.deleted" => [red],
+ b"diff.deleted.changed" => [red, bold, underline],
+ b"diff.deleted.unchanged" => [red],
+ b"diff.diffline" => [bold],
+ b"diff.extended" => [cyan, bold],
+ b"diff.file_a" => [red, bold],
+ b"diff.file_b" => [green, bold],
+ b"diff.hunk" => [magenta],
+ b"diff.inserted" => [green],
+ b"diff.inserted.changed" => [green, bold, underline],
+ b"diff.inserted.unchanged" => [green],
+ b"diff.tab" => [],
+ b"diff.trailingwhitespace" => [bold, red_background],
+ b"changeset.public" => [],
+ b"changeset.draft" => [],
+ b"changeset.secret" => [],
+ b"diffstat.deleted" => [red],
+ b"diffstat.inserted" => [green],
+ b"formatvariant.name.mismatchconfig" => [red],
+ b"formatvariant.name.mismatchdefault" => [yellow],
+ b"formatvariant.name.uptodate" => [green],
+ b"formatvariant.repo.mismatchconfig" => [red],
+ b"formatvariant.repo.mismatchdefault" => [yellow],
+ b"formatvariant.repo.uptodate" => [green],
+ b"formatvariant.config.special" => [yellow],
+ b"formatvariant.config.default" => [green],
+ b"formatvariant.default" => [],
+ b"histedit.remaining" => [red, bold],
+ b"ui.addremove.added" => [green],
+ b"ui.addremove.removed" => [red],
+ b"ui.error" => [red],
+ b"ui.prompt" => [yellow],
+ b"log.changeset" => [yellow],
+ b"patchbomb.finalsummary" => [],
+ b"patchbomb.from" => [magenta],
+ b"patchbomb.to" => [cyan],
+ b"patchbomb.subject" => [green],
+ b"patchbomb.diffstats" => [],
+ b"rebase.rebased" => [blue],
+ b"rebase.remaining" => [red, bold],
+ b"resolve.resolved" => [green, bold],
+ b"resolve.unresolved" => [red, bold],
+ b"shelve.age" => [cyan],
+ b"shelve.newest" => [green, bold],
+ b"shelve.name" => [blue, bold],
+ b"status.added" => [green, bold],
+ b"status.clean" => [none],
+ b"status.copied" => [none],
+ b"status.deleted" => [cyan, bold, underline],
+ b"status.ignored" => [black, bold],
+ b"status.modified" => [blue, bold],
+ b"status.removed" => [red, bold],
+ b"status.unknown" => [magenta, bold, underline],
+ b"tags.normal" => [green],
+ b"tags.local" => [black, bold],
+ b"upgrade-repo.requirement.preserved" => [cyan],
+ b"upgrade-repo.requirement.added" => [green],
+ b"upgrade-repo.requirement.removed" => [red],
+}
+
+fn parse_effect(config_key: &[u8], effect_name: &[u8]) -> Option<Effect> {
+ let found = effect(effect_name);
+ if found.is_none() {
+ // TODO: have some API for warnings
+ // TODO: handle IO errors during warnings
+ let stderr = std::io::stderr();
+ let _ = write_bytes!(
+ &mut stderr.lock(),
+ b"ignoring unknown color/effect '{}' \
+ (configured in color.{})\n",
+ effect_name,
+ config_key,
+ );
+ }
+ found
+}
+
+fn effects_from_config(config: &Config) -> EffectsMap {
+ let mut styles = default_styles();
+ for (key, _value) in config.iter_section(b"color") {
+ if !key.contains(&b'.')
+ || key.starts_with(b"color.")
+ || key.starts_with(b"terminfo.")
+ {
+ continue;
+ }
+ // `unwrap` shouldn’t panic since we just got this key from
+ // iteration
+ let list = config.get_list(b"color", key).unwrap();
+ let parsed = list
+ .iter()
+ .filter_map(|name| parse_effect(key, name))
+ .collect();
+ styles.insert(key.to_owned(), parsed);
+ }
+ styles
+}
+
+enum ColorMode {
+ // TODO: support other modes
+ Ansi,
+}
+
+impl ColorMode {
+ // Similar to _modesetup in mercurial/color.py
+ fn get(config: &Config) -> Result<Option<Self>, HgError> {
+ if plain(Some("color")) {
+ return Ok(None);
+ }
+ let enabled_default = b"auto";
+ // `origin` is only used when `!auto`, so its default doesn’t matter
+ let (enabled, origin) = config
+ .get_with_origin(b"ui", b"color")
+ .unwrap_or((enabled_default, &ConfigOrigin::CommandLineColor));
+ if enabled == b"debug" {
+ return Err(HgError::unsupported("debug color mode"));
+ }
+ let auto = enabled == b"auto";
+ let always;
+ if !auto {
+ let enabled_bool = config.get_bool(b"ui", b"color")?;
+ if !enabled_bool {
+ return Ok(None);
+ }
+ always = enabled == b"always"
+ || *origin == ConfigOrigin::CommandLineColor
+ } else {
+ always = false
+ };
+ let formatted = always
+ || (std::env::var_os("TERM").unwrap_or_default() != "dumb"
+ && formatted(config)?);
+
+ let mode_default = b"auto";
+ let mode = config.get(b"color", b"mode").unwrap_or(mode_default);
+
+ if formatted {
+ match mode {
+ b"ansi" | b"auto" => Ok(Some(ColorMode::Ansi)),
+ // TODO: support other modes
+ _ => Err(HgError::UnsupportedFeature(format!(
+ "color mode {}",
+ String::from_utf8_lossy(mode)
+ ))),
+ }
+ } else {
+ Ok(None)
+ }
+ }
+}
+
+pub struct ColorConfig {
+ pub styles: EffectsMap,
+}
+
+impl ColorConfig {
+ // Similar to _modesetup in mercurial/color.py
+ pub fn new(config: &Config) -> Result<Option<Self>, HgError> {
+ Ok(match ColorMode::get(config)? {
+ None => None,
+ Some(ColorMode::Ansi) => Some(ColorConfig {
+ styles: effects_from_config(config),
+ }),
+ })
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/debugignorerhg.rs Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,40 @@
+use crate::error::CommandError;
+use clap::SubCommand;
+use hg;
+use hg::matchers::get_ignore_matcher;
+use hg::StatusError;
+use log::warn;
+
+pub const HELP_TEXT: &str = "
+Show effective hgignore patterns used by rhg.
+
+This is a pure Rust version of `hg debugignore`.
+
+Some options might be missing, check the list below.
+";
+
+pub fn args() -> clap::App<'static, 'static> {
+ SubCommand::with_name("debugignorerhg").about(HELP_TEXT)
+}
+
+pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
+ let repo = invocation.repo?;
+
+ let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
+
+ let (ignore_matcher, warnings) = get_ignore_matcher(
+ vec![ignore_file],
+ &repo.working_directory_path().to_owned(),
+ &mut |_pattern_bytes| (),
+ )
+ .map_err(|e| StatusError::from(e))?;
+
+ if !warnings.is_empty() {
+ warn!("Pattern warnings: {:?}", &warnings);
+ }
+
+ let patterns = ignore_matcher.debug_get_patterns();
+ invocation.ui.write_stdout(patterns)?;
+ invocation.ui.write_stdout(b"\n")?;
+ Ok(())
+}
--- a/rust/rhg/src/commands/files.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/commands/files.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,12 @@
use crate::error::CommandError;
use crate::ui::Ui;
-use crate::ui::UiError;
-use crate::utils::path_utils::relativize_paths;
+use crate::utils::path_utils::RelativizePaths;
use clap::Arg;
+use hg::errors::HgError;
use hg::operations::list_rev_tracked_files;
use hg::operations::Dirstate;
use hg::repo::Repo;
use hg::utils::hg_path::HgPath;
-use std::borrow::Cow;
pub const HELP_TEXT: &str = "
List tracked files.
@@ -39,29 +38,60 @@
let rev = invocation.subcommand_args.value_of("rev");
let repo = invocation.repo?;
+
+ // It seems better if this check is removed: this would correspond to
+ // automatically enabling the extension if the repo requires it.
+ // However we need this check to be in sync with vanilla hg so hg tests
+ // pass.
+ if repo.has_sparse()
+ && invocation.config.get(b"extensions", b"sparse").is_none()
+ {
+ return Err(CommandError::unsupported(
+ "repo is using sparse, but sparse extension is not enabled",
+ ));
+ }
+
if let Some(rev) = rev {
+ if repo.has_narrow() {
+ return Err(CommandError::unsupported(
+ "rhg files -r <rev> is not supported in narrow clones",
+ ));
+ }
let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?;
display_files(invocation.ui, repo, files.iter())
} else {
+ // The dirstate always reflects the sparse narrowspec, so if
+ // we only have sparse without narrow all is fine.
+ // If we have narrow, then [hg files] needs to check if
+ // the store narrowspec is in sync with the one of the dirstate,
+ // so we can't support that without explicit code.
+ if repo.has_narrow() {
+ return Err(CommandError::unsupported(
+ "rhg files is not supported in narrow clones",
+ ));
+ }
let distate = Dirstate::new(repo)?;
let files = distate.tracked_files()?;
- display_files(invocation.ui, repo, files)
+ display_files(invocation.ui, repo, files.into_iter().map(Ok))
}
}
fn display_files<'a>(
ui: &Ui,
repo: &Repo,
- files: impl IntoIterator<Item = &'a HgPath>,
+ files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>,
) -> Result<(), CommandError> {
let mut stdout = ui.stdout_buffer();
let mut any = false;
- relativize_paths(repo, files, |path: Cow<[u8]>| -> Result<(), UiError> {
+ let relativize = RelativizePaths::new(repo)?;
+ for result in files {
+ let path = result?;
+ stdout.write_all(&relativize.relativize(path))?;
+ stdout.write_all(b"\n")?;
any = true;
- stdout.write_all(path.as_ref())?;
- stdout.write_all(b"\n")
- })?;
+ }
+
stdout.flush()?;
if any {
Ok(())
--- a/rust/rhg/src/commands/status.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/commands/status.rs Fri Feb 18 14:27:43 2022 +0100
@@ -6,20 +6,29 @@
// GNU General Public License version 2 or any later version.
use crate::error::CommandError;
-use crate::ui::{Ui, UiError};
-use crate::utils::path_utils::relativize_paths;
+use crate::ui::Ui;
+use crate::utils::path_utils::RelativizePaths;
use clap::{Arg, SubCommand};
+use format_bytes::format_bytes;
use hg;
use hg::config::Config;
+use hg::dirstate::has_exec_bit;
+use hg::dirstate::status::StatusPath;
use hg::dirstate::TruncatedTimestamp;
-use hg::errors::HgError;
+use hg::dirstate::RANGE_MASK_31BIT;
+use hg::errors::{HgError, IoResultExt};
+use hg::lock::LockError;
use hg::manifest::Manifest;
use hg::matchers::AlwaysMatcher;
use hg::repo::Repo;
-use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
-use hg::{HgPathCow, StatusOptions};
-use log::{info, warn};
-use std::borrow::Cow;
+use hg::utils::files::get_bytes_from_os_string;
+use hg::utils::files::get_bytes_from_path;
+use hg::utils::files::get_path_from_bytes;
+use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
+use hg::StatusOptions;
+use log::info;
+use std::io;
+use std::path::PathBuf;
pub const HELP_TEXT: &str = "
Show changed files in the working directory
@@ -81,6 +90,18 @@
.short("-i")
.long("--ignored"),
)
+ .arg(
+ Arg::with_name("copies")
+ .help("show source of copied files (DEFAULT: ui.statuscopies)")
+ .short("-C")
+ .long("--copies"),
+ )
+ .arg(
+ Arg::with_name("no-status")
+ .help("hide status prefix")
+ .short("-n")
+ .long("--no-status"),
+ )
}
/// Pure data type allowing the caller to specify file states to display
@@ -128,31 +149,43 @@
}
pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
- let status_enabled_default = false;
- let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
- if !status_enabled.unwrap_or(status_enabled_default) {
- return Err(CommandError::unsupported(
- "status is experimental in rhg (enable it with 'rhg.status = true' \
- or enable fallback with 'rhg.on-unsupported = fallback')"
- ));
- }
-
// TODO: lift these limitations
- if invocation.config.get_bool(b"ui", b"tweakdefaults").ok() == Some(true) {
+ if invocation.config.get_bool(b"ui", b"tweakdefaults")? {
return Err(CommandError::unsupported(
"ui.tweakdefaults is not yet supported with rhg status",
));
}
- if invocation.config.get_bool(b"ui", b"statuscopies").ok() == Some(true) {
+ if invocation.config.get_bool(b"ui", b"statuscopies")? {
return Err(CommandError::unsupported(
"ui.statuscopies is not yet supported with rhg status",
));
}
+ if invocation
+ .config
+ .get(b"commands", b"status.terse")
+ .is_some()
+ {
+ return Err(CommandError::unsupported(
+ "status.terse is not yet supported with rhg status",
+ ));
+ }
let ui = invocation.ui;
let config = invocation.config;
let args = invocation.subcommand_args;
- let display_states = if args.is_present("all") {
+
+ let verbose = !ui.plain(None)
+ && !args.is_present("print0")
+ && (config.get_bool(b"ui", b"verbose")?
+ || config.get_bool(b"commands", b"status.verbose")?);
+ if verbose {
+ return Err(CommandError::unsupported(
+ "verbose status is not supported yet",
+ ));
+ }
+
+ let all = args.is_present("all");
+ let display_states = if all {
// TODO when implementing `--quiet`: it excludes clean files
// from `--all`
ALL_DISPLAY_STATES
@@ -172,44 +205,84 @@
requested
}
};
+ let no_status = args.is_present("no-status");
+ let list_copies = all
+ || args.is_present("copies")
+ || config.get_bool(b"ui", b"statuscopies")?;
let repo = invocation.repo?;
+
+ if repo.has_sparse() || repo.has_narrow() {
+ return Err(CommandError::unsupported(
+ "rhg status is not supported for sparse checkouts or narrow clones yet"
+ ));
+ }
+
let mut dmap = repo.dirstate_map_mut()?;
let options = StatusOptions {
- // TODO should be provided by the dirstate parsing and
- // hence be stored on dmap. Using a value that assumes we aren't
- // below the time resolution granularity of the FS and the
- // dirstate.
- last_normal_time: TruncatedTimestamp::new_truncate(0, 0),
// we're currently supporting file systems with exec flags only
// anyway
check_exec: true,
list_clean: display_states.clean,
list_unknown: display_states.unknown,
list_ignored: display_states.ignored,
+ list_copies,
collect_traversed_dirs: false,
};
- let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
let (mut ds_status, pattern_warnings) = dmap.status(
&AlwaysMatcher,
repo.working_directory_path().to_owned(),
- vec![ignore_file],
+ ignore_files(repo, config),
options,
)?;
- if !pattern_warnings.is_empty() {
- warn!("Pattern warnings: {:?}", &pattern_warnings);
+ for warning in pattern_warnings {
+ match warning {
+ hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui
+ .write_stderr(&format_bytes!(
+ b"{}: ignoring invalid syntax '{}'\n",
+ get_bytes_from_path(path),
+ &*syntax
+ ))?,
+ hg::PatternFileWarning::NoSuchFile(path) => {
+ let path = if let Ok(relative) =
+ path.strip_prefix(repo.working_directory_path())
+ {
+ relative
+ } else {
+ &*path
+ };
+ ui.write_stderr(&format_bytes!(
+ b"skipping unreadable pattern file '{}': \
+ No such file or directory\n",
+ get_bytes_from_path(path),
+ ))?
+ }
+ }
}
- if !ds_status.bad.is_empty() {
- warn!("Bad matches {:?}", &(ds_status.bad))
+ for (path, error) in ds_status.bad {
+ let error = match error {
+ hg::BadMatch::OsError(code) => {
+ std::io::Error::from_raw_os_error(code).to_string()
+ }
+ hg::BadMatch::BadType(ty) => {
+ format!("unsupported file type (type is {})", ty)
+ }
+ };
+ ui.write_stderr(&format_bytes!(
+ b"{}: {}\n",
+ path.as_bytes(),
+ error.as_bytes()
+ ))?
}
if !ds_status.unsure.is_empty() {
info!(
"Files to be rechecked by retrieval from filelog: {:?}",
- &ds_status.unsure
+ ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
);
}
+ let mut fixup = Vec::new();
if !ds_status.unsure.is_empty()
&& (display_states.modified || display_states.clean)
{
@@ -218,99 +291,241 @@
CommandError::from((e, &*format!("{:x}", p1.short())))
})?;
for to_check in ds_status.unsure {
- if cat_file_is_modified(repo, &manifest, &to_check)? {
+ if unsure_is_modified(repo, &manifest, &to_check.path)? {
if display_states.modified {
ds_status.modified.push(to_check);
}
} else {
if display_states.clean {
- ds_status.clean.push(to_check);
+ ds_status.clean.push(to_check.clone());
}
+ fixup.push(to_check.path.into_owned())
}
}
}
+ let relative_paths = (!ui.plain(None))
+ && config
+ .get_option(b"commands", b"status.relative")?
+ .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
+ let output = DisplayStatusPaths {
+ ui,
+ no_status,
+ relativize: if relative_paths {
+ Some(RelativizePaths::new(repo)?)
+ } else {
+ None
+ },
+ };
if display_states.modified {
- display_status_paths(ui, repo, config, &mut ds_status.modified, b"M")?;
+ output.display(b"M ", "status.modified", ds_status.modified)?;
}
if display_states.added {
- display_status_paths(ui, repo, config, &mut ds_status.added, b"A")?;
+ output.display(b"A ", "status.added", ds_status.added)?;
}
if display_states.removed {
- display_status_paths(ui, repo, config, &mut ds_status.removed, b"R")?;
+ output.display(b"R ", "status.removed", ds_status.removed)?;
}
if display_states.deleted {
- display_status_paths(ui, repo, config, &mut ds_status.deleted, b"!")?;
+ output.display(b"! ", "status.deleted", ds_status.deleted)?;
}
if display_states.unknown {
- display_status_paths(ui, repo, config, &mut ds_status.unknown, b"?")?;
+ output.display(b"? ", "status.unknown", ds_status.unknown)?;
}
if display_states.ignored {
- display_status_paths(ui, repo, config, &mut ds_status.ignored, b"I")?;
+ output.display(b"I ", "status.ignored", ds_status.ignored)?;
}
if display_states.clean {
- display_status_paths(ui, repo, config, &mut ds_status.clean, b"C")?;
+ output.display(b"C ", "status.clean", ds_status.clean)?;
+ }
+
+ let mut dirstate_write_needed = ds_status.dirty;
+ let filesystem_time_at_status_start =
+ ds_status.filesystem_time_at_status_start;
+
+ if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
+ && !dirstate_write_needed
+ {
+ // Nothing to update
+ return Ok(());
+ }
+
+ // Update the dirstate on disk if we can
+ let with_lock_result =
+ repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
+ if let Some(mtime_boundary) = filesystem_time_at_status_start {
+ for hg_path in fixup {
+ use std::os::unix::fs::MetadataExt;
+ let fs_path = hg_path_to_path_buf(&hg_path)
+ .expect("HgPath conversion");
+ // Specifically do not reuse `fs_metadata` from
+ // `unsure_is_clean` which was needed before reading
+ // contents. Here we access metadata again after reading
+ // content, in case it changed in the meantime.
+ let fs_metadata = repo
+ .working_directory_vfs()
+ .symlink_metadata(&fs_path)?;
+ if let Some(mtime) =
+ TruncatedTimestamp::for_reliable_mtime_of(
+ &fs_metadata,
+ &mtime_boundary,
+ )
+ .when_reading_file(&fs_path)?
+ {
+ let mode = fs_metadata.mode();
+ let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT;
+ let mut entry = dmap
+ .get(&hg_path)?
+ .expect("ambiguous file not in dirstate");
+ entry.set_clean(mode, size, mtime);
+ dmap.add_file(&hg_path, entry)?;
+ dirstate_write_needed = true
+ }
+ }
+ }
+ drop(dmap); // Avoid "already mutably borrowed" RefCell panics
+ if dirstate_write_needed {
+ repo.write_dirstate()?
+ }
+ Ok(())
+ });
+ match with_lock_result {
+ Ok(closure_result) => closure_result?,
+ Err(LockError::AlreadyHeld) => {
+ // Not updating the dirstate is not ideal but not critical:
+ // don’t keep our caller waiting until some other Mercurial
+ // process releases the lock.
+ }
+ Err(LockError::Other(HgError::IoError { error, .. }))
+ if error.kind() == io::ErrorKind::PermissionDenied =>
+ {
+ // `hg status` on a read-only repository is fine
+ }
+ Err(LockError::Other(error)) => {
+ // Report other I/O errors
+ Err(error)?
+ }
}
Ok(())
}
-// Probably more elegant to use a Deref or Borrow trait rather than
-// harcode HgPathBuf, but probably not really useful at this point
-fn display_status_paths(
- ui: &Ui,
- repo: &Repo,
- config: &Config,
- paths: &mut [HgPathCow],
- status_prefix: &[u8],
-) -> Result<(), CommandError> {
- paths.sort_unstable();
- let mut relative: bool =
- config.get_bool(b"ui", b"relative-paths").unwrap_or(false);
- relative = config
- .get_bool(b"commands", b"status.relative")
- .unwrap_or(relative);
- if relative && !ui.plain() {
- relativize_paths(
- repo,
- paths,
- |path: Cow<[u8]>| -> Result<(), UiError> {
- ui.write_stdout(
- &[status_prefix, b" ", path.as_ref(), b"\n"].concat(),
- )
- },
- )?;
- } else {
- for path in paths {
- // Same TODO as in commands::root
- let bytes: &[u8] = path.as_bytes();
- // TODO optim, probably lots of unneeded copies here, especially
- // if out stream is buffered
- ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
+fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
+ let mut ignore_files = Vec::new();
+ let repo_ignore = repo.working_directory_vfs().join(".hgignore");
+ if repo_ignore.exists() {
+ ignore_files.push(repo_ignore)
+ }
+ for (key, value) in config.iter_section(b"ui") {
+ if key == b"ignore" || key.starts_with(b"ignore.") {
+ let path = get_path_from_bytes(value);
+ // TODO: expand "~/" and environment variable here, like Python
+ // does with `os.path.expanduser` and `os.path.expandvars`
+
+ let joined = repo.working_directory_path().join(path);
+ ignore_files.push(joined);
}
}
- Ok(())
+ ignore_files
+}
+
+struct DisplayStatusPaths<'a> {
+ ui: &'a Ui,
+ no_status: bool,
+ relativize: Option<RelativizePaths>,
+}
+
+impl DisplayStatusPaths<'_> {
+ // Probably more elegant to use a Deref or Borrow trait rather than
+ // harcode HgPathBuf, but probably not really useful at this point
+ fn display(
+ &self,
+ status_prefix: &[u8],
+ label: &'static str,
+ mut paths: Vec<StatusPath<'_>>,
+ ) -> Result<(), CommandError> {
+ paths.sort_unstable();
+ // TODO: get the stdout lock once for the whole loop
+ // instead of in each write
+ for StatusPath { path, copy_source } in paths {
+ let relative;
+ let path = if let Some(relativize) = &self.relativize {
+ relative = relativize.relativize(&path);
+ &*relative
+ } else {
+ path.as_bytes()
+ };
+ // TODO: Add a way to use `write_bytes!` instead of `format_bytes!`
+ // in order to stream to stdout instead of allocating an
+ // itermediate `Vec<u8>`.
+ if !self.no_status {
+ self.ui.write_stdout_labelled(status_prefix, label)?
+ }
+ self.ui
+ .write_stdout_labelled(&format_bytes!(b"{}\n", path), label)?;
+ if let Some(source) = copy_source {
+ let label = "status.copied";
+ self.ui.write_stdout_labelled(
+ &format_bytes!(b" {}\n", source.as_bytes()),
+ label,
+ )?
+ }
+ }
+ Ok(())
+ }
}
/// Check if a file is modified by comparing actual repo store and file system.
///
/// This meant to be used for those that the dirstate cannot resolve, due
/// to time resolution limits.
-///
-/// TODO: detect permission bits and similar metadata modifications
-fn cat_file_is_modified(
+fn unsure_is_modified(
repo: &Repo,
manifest: &Manifest,
hg_path: &HgPath,
) -> Result<bool, HgError> {
- let file_node = manifest
- .find_file(hg_path)?
+ let vfs = repo.working_directory_vfs();
+ let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
+ let fs_metadata = vfs.symlink_metadata(&fs_path)?;
+ let is_symlink = fs_metadata.file_type().is_symlink();
+ // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
+ // dirstate
+ let fs_flags = if is_symlink {
+ Some(b'l')
+ } else if has_exec_bit(&fs_metadata) {
+ Some(b'x')
+ } else {
+ None
+ };
+
+ let entry = manifest
+ .find_by_path(hg_path)?
.expect("ambgious file not in p1");
+ if entry.flags != fs_flags {
+ return Ok(true);
+ }
let filelog = repo.filelog(hg_path)?;
- let filelog_entry = filelog.data_for_node(file_node).map_err(|_| {
- HgError::corrupted("filelog missing node from manifest")
- })?;
- let contents_in_p1 = filelog_entry.data()?;
+ let fs_len = fs_metadata.len();
+ let filelog_entry =
+ filelog.entry_for_node(entry.node_id()?).map_err(|_| {
+ HgError::corrupted("filelog missing node from manifest")
+ })?;
+ if filelog_entry.file_data_len_not_equal_to(fs_len) {
+ // No need to read file contents:
+ // it cannot be equal if it has a different length.
+ return Ok(true);
+ }
- let fs_path = hg_path_to_os_string(hg_path).expect("HgPath conversion");
- let fs_contents = repo.working_directory_vfs().read(fs_path)?;
- return Ok(contents_in_p1 != &*fs_contents);
+ let p1_filelog_data = filelog_entry.data()?;
+ let p1_contents = p1_filelog_data.file_data()?;
+ if p1_contents.len() as u64 != fs_len {
+ // No need to read file contents:
+ // it cannot be equal if it has a different length.
+ return Ok(true);
+ }
+
+ let fs_contents = if is_symlink {
+ get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
+ } else {
+ vfs.read(fs_path)?
+ };
+ Ok(p1_contents != &*fs_contents)
}
--- a/rust/rhg/src/main.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/main.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,5 +1,6 @@
extern crate log;
-use crate::ui::Ui;
+use crate::error::CommandError;
+use crate::ui::{local_to_utf8, Ui};
use clap::App;
use clap::AppSettings;
use clap::Arg;
@@ -10,17 +11,18 @@
use hg::repo::{Repo, RepoError};
use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes};
use hg::utils::SliceExt;
+use std::collections::HashSet;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process::Command;
mod blackbox;
+mod color;
mod error;
mod ui;
pub mod utils {
pub mod path_utils;
}
-use error::CommandError;
fn main_with_result(
process_start_time: &blackbox::ProcessStartTime,
@@ -28,7 +30,7 @@
repo: Result<&Repo, &NoRepoInCwdError>,
config: &Config,
) -> Result<(), CommandError> {
- check_extensions(config)?;
+ check_unsupported(config, repo)?;
let app = App::new("rhg")
.global_setting(AppSettings::AllowInvalidUtf8)
@@ -65,6 +67,14 @@
.takes_value(true)
.global(true),
)
+ .arg(
+ Arg::with_name("color")
+ .help("when to colorize (boolean, always, auto, never, or debug)")
+ .long("--color")
+ .value_name("TYPE")
+ .takes_value(true)
+ .global(true),
+ )
.version("0.0.1");
let app = add_subcommand_args(app);
@@ -110,18 +120,23 @@
}
}
- let blackbox = blackbox::Blackbox::new(&invocation, process_start_time)?;
- blackbox.log_command_start();
- let result = run(&invocation);
- blackbox.log_command_end(exit_code(
- &result,
- // TODO: show a warning or combine with original error if `get_bool`
- // returns an error
- config
- .get_bool(b"ui", b"detailed-exit-code")
- .unwrap_or(false),
- ));
- result
+ if config.is_extension_enabled(b"blackbox") {
+ let blackbox =
+ blackbox::Blackbox::new(&invocation, process_start_time)?;
+ blackbox.log_command_start();
+ let result = run(&invocation);
+ blackbox.log_command_end(exit_code(
+ &result,
+ // TODO: show a warning or combine with original error if
+ // `get_bool` returns an error
+ config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ ));
+ result
+ } else {
+ run(&invocation)
+ }
}
fn main() {
@@ -131,7 +146,6 @@
let process_start_time = blackbox::ProcessStartTime::now();
env_logger::init();
- let ui = ui::Ui::new();
let early_args = EarlyArgs::parse(std::env::args_os());
@@ -145,7 +159,7 @@
.unwrap_or_else(|error| {
exit(
&None,
- &ui,
+ &Ui::new_infallible(&Config::empty()),
OnUnsupported::Abort,
Err(CommandError::abort(format!(
"abort: {}: '{}'",
@@ -166,7 +180,7 @@
exit(
&initial_current_dir,
- &ui,
+ &Ui::new_infallible(&Config::empty()),
on_unsupported,
Err(error.into()),
false,
@@ -174,12 +188,12 @@
});
non_repo_config
- .load_cli_args_config(early_args.config)
+ .load_cli_args(early_args.config, early_args.color)
.unwrap_or_else(|error| {
exit(
&initial_current_dir,
- &ui,
- OnUnsupported::from_config(&ui, &non_repo_config),
+ &Ui::new_infallible(&non_repo_config),
+ OnUnsupported::from_config(&non_repo_config),
Err(error.into()),
non_repo_config
.get_bool(b"ui", b"detailed-exit-code")
@@ -196,8 +210,8 @@
if SCHEME_RE.is_match(&repo_path_bytes) {
exit(
&initial_current_dir,
- &ui,
- OnUnsupported::from_config(&ui, &non_repo_config),
+ &Ui::new_infallible(&non_repo_config),
+ OnUnsupported::from_config(&non_repo_config),
Err(CommandError::UnsupportedFeature {
message: format_bytes!(
b"URL-like --repository {}",
@@ -286,8 +300,8 @@
}
Err(error) => exit(
&initial_current_dir,
- &ui,
- OnUnsupported::from_config(&ui, &non_repo_config),
+ &Ui::new_infallible(&non_repo_config),
+ OnUnsupported::from_config(&non_repo_config),
Err(error.into()),
// TODO: show a warning or combine with original error if
// `get_bool` returns an error
@@ -302,7 +316,18 @@
} else {
&non_repo_config
};
- let on_unsupported = OnUnsupported::from_config(&ui, config);
+ let ui = Ui::new(&config).unwrap_or_else(|error| {
+ exit(
+ &initial_current_dir,
+ &Ui::new_infallible(&config),
+ OnUnsupported::from_config(&config),
+ Err(error.into()),
+ config
+ .get_bool(b"ui", b"detailed-exit-code")
+ .unwrap_or(false),
+ )
+ });
+ let on_unsupported = OnUnsupported::from_config(config);
let result = main_with_result(
&process_start_time,
@@ -358,10 +383,24 @@
) -> ! {
if let (
OnUnsupported::Fallback { executable },
- Err(CommandError::UnsupportedFeature { .. }),
+ Err(CommandError::UnsupportedFeature { message }),
) = (&on_unsupported, &result)
{
let mut args = std::env::args_os();
+ let executable = match executable {
+ None => {
+ exit_no_fallback(
+ ui,
+ OnUnsupported::Abort,
+ Err(CommandError::abort(
+ "abort: 'rhg.on-unsupported=fallback' without \
+ 'rhg.fallback-executable' set.",
+ )),
+ false,
+ );
+ }
+ Some(executable) => executable,
+ };
let executable_path = get_path_from_bytes(&executable);
let this_executable = args.next().expect("exepcted argv[0] to exist");
if executable_path == &PathBuf::from(this_executable) {
@@ -374,7 +413,10 @@
));
on_unsupported = OnUnsupported::Abort
} else {
- // `args` is now `argv[1..]` since we’ve already consumed `argv[0]`
+ log::debug!("falling back (see trace-level log)");
+ log::trace!("{}", local_to_utf8(message));
+ // `args` is now `argv[1..]` since we’ve already consumed
+ // `argv[0]`
let mut command = Command::new(executable_path);
command.args(args);
if let Some(initial) = initial_current_dir {
@@ -465,6 +507,7 @@
cat
debugdata
debugrequirements
+ debugignorerhg
files
root
config
@@ -494,6 +537,8 @@
struct EarlyArgs {
/// Values of all `--config` arguments. (Possibly none)
config: Vec<Vec<u8>>,
+ /// Value of all the `--color` argument, if any.
+ color: Option<Vec<u8>>,
/// Value of the `-R` or `--repository` argument, if any.
repo: Option<Vec<u8>>,
/// Value of the `--cwd` argument, if any.
@@ -504,6 +549,7 @@
fn parse(args: impl IntoIterator<Item = OsString>) -> Self {
let mut args = args.into_iter().map(get_bytes_from_os_str);
let mut config = Vec::new();
+ let mut color = None;
let mut repo = None;
let mut cwd = None;
// Use `while let` instead of `for` so that we can also call
@@ -517,6 +563,14 @@
config.push(value.to_owned())
}
+ if arg == b"--color" {
+ if let Some(value) = args.next() {
+ color = Some(value)
+ }
+ } else if let Some(value) = arg.drop_prefix(b"--color=") {
+ color = Some(value.to_owned())
+ }
+
if arg == b"--cwd" {
if let Some(value) = args.next() {
cwd = Some(value)
@@ -535,7 +589,12 @@
repo = Some(value.to_owned())
}
}
- Self { config, repo, cwd }
+ Self {
+ config,
+ color,
+ repo,
+ cwd,
+ }
}
}
@@ -549,13 +608,13 @@
/// Silently exit with code 252.
AbortSilent,
/// Try running a Python implementation
- Fallback { executable: Vec<u8> },
+ Fallback { executable: Option<Vec<u8>> },
}
impl OnUnsupported {
const DEFAULT: Self = OnUnsupported::Abort;
- fn from_config(ui: &Ui, config: &Config) -> Self {
+ fn from_config(config: &Config) -> Self {
match config
.get(b"rhg", b"on-unsupported")
.map(|value| value.to_ascii_lowercase())
@@ -566,18 +625,7 @@
Some(b"fallback") => OnUnsupported::Fallback {
executable: config
.get(b"rhg", b"fallback-executable")
- .unwrap_or_else(|| {
- exit_no_fallback(
- ui,
- Self::Abort,
- Err(CommandError::abort(
- "abort: 'rhg.on-unsupported=fallback' without \
- 'rhg.fallback-executable' set."
- )),
- false,
- )
- })
- .to_owned(),
+ .map(|x| x.to_owned()),
},
None => Self::DEFAULT,
Some(_) => {
@@ -588,10 +636,23 @@
}
}
-const SUPPORTED_EXTENSIONS: &[&[u8]] = &[b"blackbox", b"share"];
+/// The `*` extension is an edge-case for config sub-options that apply to all
+/// extensions. For now, only `:required` exists, but that may change in the
+/// future.
+const SUPPORTED_EXTENSIONS: &[&[u8]] =
+ &[b"blackbox", b"share", b"sparse", b"narrow", b"*"];
fn check_extensions(config: &Config) -> Result<(), CommandError> {
- let enabled = config.get_section_keys(b"extensions");
+ let enabled: HashSet<&[u8]> = config
+ .get_section_keys(b"extensions")
+ .into_iter()
+ .map(|extension| {
+ // Ignore extension suboptions. Only `required` exists for now.
+ // `rhg` either supports an extension or doesn't, so it doesn't
+ // make sense to consider the loading of an extension.
+ extension.split_2(b':').unwrap_or((extension, b"")).0
+ })
+ .collect();
let mut unsupported = enabled;
for supported in SUPPORTED_EXTENSIONS {
@@ -616,3 +677,32 @@
})
}
}
+
+fn check_unsupported(
+ config: &Config,
+ repo: Result<&Repo, &NoRepoInCwdError>,
+) -> Result<(), CommandError> {
+ check_extensions(config)?;
+
+ if std::env::var_os("HG_PENDING").is_some() {
+ // TODO: only if the value is `== repo.working_directory`?
+ // What about relative v.s. absolute paths?
+ Err(CommandError::unsupported("$HG_PENDING"))?
+ }
+
+ if let Ok(repo) = repo {
+ if repo.has_subrepos()? {
+ Err(CommandError::unsupported("sub-repositories"))?
+ }
+ }
+
+ if config.has_non_empty_section(b"encode") {
+ Err(CommandError::unsupported("[encode] config"))?
+ }
+
+ if config.has_non_empty_section(b"decode") {
+ Err(CommandError::unsupported("[decode] config"))?
+ }
+
+ Ok(())
+}
--- a/rust/rhg/src/ui.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/ui.rs Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,19 @@
+use crate::color::ColorConfig;
+use crate::color::Effect;
use format_bytes::format_bytes;
+use format_bytes::write_bytes;
+use hg::config::Config;
+use hg::errors::HgError;
+use hg::utils::files::get_bytes_from_os_string;
use std::borrow::Cow;
use std::env;
use std::io;
use std::io::{ErrorKind, Write};
-#[derive(Debug)]
pub struct Ui {
stdout: std::io::Stdout,
stderr: std::io::Stderr,
+ colors: Option<ColorConfig>,
}
/// The kind of user interface error
@@ -20,10 +26,26 @@
/// The commandline user interface
impl Ui {
- pub fn new() -> Self {
+ pub fn new(config: &Config) -> Result<Self, HgError> {
+ Ok(Ui {
+ // If using something else, also adapt `isatty()` below.
+ stdout: std::io::stdout(),
+
+ stderr: std::io::stderr(),
+ colors: ColorConfig::new(config)?,
+ })
+ }
+
+ /// Default to no color if color configuration errors.
+ ///
+ /// Useful when we’re already handling another error.
+ pub fn new_infallible(config: &Config) -> Self {
Ui {
+ // If using something else, also adapt `isatty()` below.
stdout: std::io::stdout(),
+
stderr: std::io::stderr(),
+ colors: ColorConfig::new(config).unwrap_or(None),
}
}
@@ -51,7 +73,62 @@
stderr.flush().or_else(handle_stderr_error)
}
- /// is plain mode active
+ /// Write bytes to stdout with the given label
+ ///
+ /// Like the optional `label` parameter in `mercurial/ui.py`,
+ /// this label influences the color used for this output.
+ pub fn write_stdout_labelled(
+ &self,
+ bytes: &[u8],
+ label: &str,
+ ) -> Result<(), UiError> {
+ if let Some(colors) = &self.colors {
+ if let Some(effects) = colors.styles.get(label.as_bytes()) {
+ if !effects.is_empty() {
+ return self
+ .write_stdout_with_effects(bytes, effects)
+ .or_else(handle_stdout_error);
+ }
+ }
+ }
+ self.write_stdout(bytes)
+ }
+
+ fn write_stdout_with_effects(
+ &self,
+ bytes: &[u8],
+ effects: &[Effect],
+ ) -> io::Result<()> {
+ let stdout = &mut self.stdout.lock();
+ let mut write_line = |line: &[u8], first: bool| {
+ // `line` does not include the newline delimiter
+ if !first {
+ stdout.write_all(b"\n")?;
+ }
+ if line.is_empty() {
+ return Ok(());
+ }
+ /// 0x1B == 27 == 0o33
+ const ASCII_ESCAPE: &[u8] = b"\x1b";
+ write_bytes!(stdout, b"{}[0", ASCII_ESCAPE)?;
+ for effect in effects {
+ write_bytes!(stdout, b";{}", effect)?;
+ }
+ write_bytes!(stdout, b"m")?;
+ stdout.write_all(line)?;
+ write_bytes!(stdout, b"{}[0m", ASCII_ESCAPE)
+ };
+ let mut lines = bytes.split(|&byte| byte == b'\n');
+ if let Some(first) = lines.next() {
+ write_line(first, true)?;
+ for line in lines {
+ write_line(line, false)?
+ }
+ }
+ stdout.flush()
+ }
+
+ /// Return whether plain mode is active.
///
/// Plain mode means that all configuration variables which affect
/// the behavior and output of Mercurial should be
@@ -65,8 +142,19 @@
/// - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
/// - False if feature is disabled by default and not included in HGPLAIN
/// - True otherwise
- pub fn plain(&self) -> bool {
- // TODO: add support for HGPLAINEXCEPT
+ pub fn plain(&self, feature: Option<&str>) -> bool {
+ plain(feature)
+ }
+}
+
+pub fn plain(opt_feature: Option<&str>) -> bool {
+ if let Some(except) = env::var_os("HGPLAINEXCEPT") {
+ opt_feature.map_or(true, |feature| {
+ get_bytes_from_os_string(except)
+ .split(|&byte| byte == b',')
+ .all(|exception| exception != feature.as_bytes())
+ })
+ } else {
env::var_os("HGPLAIN").is_some()
}
}
@@ -130,3 +218,29 @@
let bytes = s.as_bytes();
Cow::Borrowed(bytes)
}
+
+/// Decode user system bytes to Rust string.
+pub fn local_to_utf8(s: &[u8]) -> Cow<str> {
+ // TODO decode from the user's system
+ String::from_utf8_lossy(s)
+}
+
+/// Should formatted output be used?
+///
+/// Note: rhg does not have the formatter mechanism yet,
+/// but this is also used when deciding whether to use color.
+pub fn formatted(config: &Config) -> Result<bool, HgError> {
+ if let Some(formatted) = config.get_option(b"ui", b"formatted")? {
+ Ok(formatted)
+ } else {
+ isatty(config)
+ }
+}
+
+fn isatty(config: &Config) -> Result<bool, HgError> {
+ Ok(if config.get_bool(b"ui", b"nontty")? {
+ false
+ } else {
+ atty::is(atty::Stream::Stdout)
+ })
+}
--- a/rust/rhg/src/utils/path_utils.rs Fri Feb 18 12:55:39 2022 +0100
+++ b/rust/rhg/src/utils/path_utils.rs Fri Feb 18 14:27:43 2022 +0100
@@ -3,8 +3,7 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::error::CommandError;
-use crate::ui::UiError;
+use hg::errors::HgError;
use hg::repo::Repo;
use hg::utils::current_dir;
use hg::utils::files::{get_bytes_from_path, relativize_path};
@@ -12,37 +11,45 @@
use hg::utils::hg_path::HgPathBuf;
use std::borrow::Cow;
-pub fn relativize_paths(
- repo: &Repo,
- paths: impl IntoIterator<Item = impl AsRef<HgPath>>,
- mut callback: impl FnMut(Cow<[u8]>) -> Result<(), UiError>,
-) -> Result<(), CommandError> {
- let cwd = current_dir()?;
- let repo_root = repo.working_directory_path();
- let repo_root = cwd.join(repo_root); // Make it absolute
- let repo_root_hgpath =
- HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
- let outside_repo: bool;
- let cwd_hgpath: HgPathBuf;
+pub struct RelativizePaths {
+ repo_root: HgPathBuf,
+ cwd: HgPathBuf,
+ outside_repo: bool,
+}
+
+impl RelativizePaths {
+ pub fn new(repo: &Repo) -> Result<Self, HgError> {
+ let cwd = current_dir()?;
+ let repo_root = repo.working_directory_path();
+ let repo_root = cwd.join(repo_root); // Make it absolute
+ let repo_root_hgpath =
+ HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
- if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
- // The current directory is inside the repo, so we can work with
- // relative paths
- outside_repo = false;
- cwd_hgpath =
- HgPathBuf::from(get_bytes_from_path(cwd_relative_to_repo));
- } else {
- outside_repo = true;
- cwd_hgpath = HgPathBuf::from(get_bytes_from_path(cwd));
+ if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
+ // The current directory is inside the repo, so we can work with
+ // relative paths
+ Ok(Self {
+ repo_root: repo_root_hgpath,
+ cwd: HgPathBuf::from(get_bytes_from_path(
+ cwd_relative_to_repo,
+ )),
+ outside_repo: false,
+ })
+ } else {
+ Ok(Self {
+ repo_root: repo_root_hgpath,
+ cwd: HgPathBuf::from(get_bytes_from_path(cwd)),
+ outside_repo: true,
+ })
+ }
}
- for file in paths {
- if outside_repo {
- let file = repo_root_hgpath.join(file.as_ref());
- callback(relativize_path(&file, &cwd_hgpath))?;
+ pub fn relativize<'a>(&self, path: &'a HgPath) -> Cow<'a, [u8]> {
+ if self.outside_repo {
+ let joined = self.repo_root.join(path);
+ Cow::Owned(relativize_path(&joined, &self.cwd).into_owned())
} else {
- callback(relativize_path(file.as_ref(), &cwd_hgpath))?;
+ relativize_path(path, &self.cwd)
}
}
- Ok(())
}
--- a/setup.py Fri Feb 18 12:55:39 2022 +0100
+++ b/setup.py Fri Feb 18 14:27:43 2022 +0100
@@ -209,7 +209,7 @@
from distutils.sysconfig import get_python_inc, get_config_var
from distutils.version import StrictVersion
-# Explain to distutils.StrictVersion how our release candidates are versionned
+# Explain to distutils.StrictVersion how our release candidates are versioned
StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
@@ -535,7 +535,7 @@
# (see mercurial/__modulepolicy__.py)
if hgrustext != 'cpython' and hgrustext is not None:
if hgrustext:
- msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext
+ msg = 'unknown HGWITHRUSTEXT value: %s' % hgrustext
printf(msg, file=sys.stderr)
hgrustext = None
self.rust = hgrustext is not None
@@ -597,8 +597,8 @@
e for e in self.extensions if e.name != 'mercurial.zstd'
]
- # Build Rust standalon extensions if it'll be used
- # and its build is not explictely disabled (for external build
+ # Build Rust standalone extensions if it'll be used
+ # and its build is not explicitly disabled (for external build
# as Linux distributions would do)
if self.distribution.rust and self.rust:
if not sys.platform.startswith('linux'):
@@ -1502,7 +1502,7 @@
raise RustCompilationError("Cargo not found")
elif exc.errno == errno.EACCES:
raise RustCompilationError(
- "Cargo found, but permisssion to execute it is denied"
+ "Cargo found, but permission to execute it is denied"
)
else:
raise
--- a/tests/badserverext.py Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,384 +0,0 @@
-# badserverext.py - Extension making servers behave badly
-#
-# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# no-check-code
-
-"""Extension to make servers behave badly.
-
-This extension is useful for testing Mercurial behavior when various network
-events occur.
-
-Various config options in the [badserver] section influence behavior:
-
-closebeforeaccept
- If true, close() the server socket when a new connection arrives before
- accept() is called. The server will then exit.
-
-closeafteraccept
- If true, the server will close() the client socket immediately after
- accept().
-
-closeafterrecvbytes
- If defined, close the client socket after receiving this many bytes.
-
-closeaftersendbytes
- If defined, close the client socket after sending this many bytes.
-"""
-
-from __future__ import absolute_import
-
-import socket
-
-from mercurial import (
- pycompat,
- registrar,
-)
-
-from mercurial.hgweb import server
-
-configtable = {}
-configitem = registrar.configitem(configtable)
-
-configitem(
- b'badserver',
- b'closeafteraccept',
- default=False,
-)
-configitem(
- b'badserver',
- b'closeafterrecvbytes',
- default=b'0',
-)
-configitem(
- b'badserver',
- b'closeaftersendbytes',
- default=b'0',
-)
-configitem(
- b'badserver',
- b'closebeforeaccept',
- default=False,
-)
-
-# We can't adjust __class__ on a socket instance. So we define a proxy type.
-class socketproxy(object):
- __slots__ = (
- '_orig',
- '_logfp',
- '_closeafterrecvbytes',
- '_closeaftersendbytes',
- )
-
- def __init__(
- self, obj, logfp, closeafterrecvbytes=0, closeaftersendbytes=0
- ):
- object.__setattr__(self, '_orig', obj)
- object.__setattr__(self, '_logfp', logfp)
- object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
- object.__setattr__(self, '_closeaftersendbytes', closeaftersendbytes)
-
- def __getattribute__(self, name):
- if name in ('makefile', 'sendall', '_writelog'):
- return object.__getattribute__(self, name)
-
- return getattr(object.__getattribute__(self, '_orig'), name)
-
- def __delattr__(self, name):
- delattr(object.__getattribute__(self, '_orig'), name)
-
- def __setattr__(self, name, value):
- setattr(object.__getattribute__(self, '_orig'), name, value)
-
- def _writelog(self, msg):
- msg = msg.replace(b'\r', b'\\r').replace(b'\n', b'\\n')
-
- object.__getattribute__(self, '_logfp').write(msg)
- object.__getattribute__(self, '_logfp').write(b'\n')
- object.__getattribute__(self, '_logfp').flush()
-
- def makefile(self, mode, bufsize):
- f = object.__getattribute__(self, '_orig').makefile(mode, bufsize)
-
- logfp = object.__getattribute__(self, '_logfp')
- closeafterrecvbytes = object.__getattribute__(
- self, '_closeafterrecvbytes'
- )
- closeaftersendbytes = object.__getattribute__(
- self, '_closeaftersendbytes'
- )
-
- return fileobjectproxy(
- f,
- logfp,
- closeafterrecvbytes=closeafterrecvbytes,
- closeaftersendbytes=closeaftersendbytes,
- )
-
- def sendall(self, data, flags=0):
- remaining = object.__getattribute__(self, '_closeaftersendbytes')
-
- # No read limit. Call original function.
- if not remaining:
- result = object.__getattribute__(self, '_orig').sendall(data, flags)
- self._writelog(b'sendall(%d) -> %s' % (len(data), data))
- return result
-
- if len(data) > remaining:
- newdata = data[0:remaining]
- else:
- newdata = data
-
- remaining -= len(newdata)
-
- result = object.__getattribute__(self, '_orig').sendall(newdata, flags)
-
- self._writelog(
- b'sendall(%d from %d) -> (%d) %s'
- % (len(newdata), len(data), remaining, newdata)
- )
-
- object.__setattr__(self, '_closeaftersendbytes', remaining)
-
- if remaining <= 0:
- self._writelog(b'write limit reached; closing socket')
- object.__getattribute__(self, '_orig').shutdown(socket.SHUT_RDWR)
-
- raise Exception('connection closed after sending N bytes')
-
- return result
-
-
-# We can't adjust __class__ on socket._fileobject, so define a proxy.
-class fileobjectproxy(object):
- __slots__ = (
- '_orig',
- '_logfp',
- '_closeafterrecvbytes',
- '_closeaftersendbytes',
- )
-
- def __init__(
- self, obj, logfp, closeafterrecvbytes=0, closeaftersendbytes=0
- ):
- object.__setattr__(self, '_orig', obj)
- object.__setattr__(self, '_logfp', logfp)
- object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
- object.__setattr__(self, '_closeaftersendbytes', closeaftersendbytes)
-
- def __getattribute__(self, name):
- if name in ('_close', 'read', 'readline', 'write', '_writelog'):
- return object.__getattribute__(self, name)
-
- return getattr(object.__getattribute__(self, '_orig'), name)
-
- def __delattr__(self, name):
- delattr(object.__getattribute__(self, '_orig'), name)
-
- def __setattr__(self, name, value):
- setattr(object.__getattribute__(self, '_orig'), name, value)
-
- def _writelog(self, msg):
- msg = msg.replace(b'\r', b'\\r').replace(b'\n', b'\\n')
-
- object.__getattribute__(self, '_logfp').write(msg)
- object.__getattribute__(self, '_logfp').write(b'\n')
- object.__getattribute__(self, '_logfp').flush()
-
- def _close(self):
- # Python 3 uses an io.BufferedIO instance. Python 2 uses some file
- # object wrapper.
- if pycompat.ispy3:
- orig = object.__getattribute__(self, '_orig')
-
- if hasattr(orig, 'raw'):
- orig.raw._sock.shutdown(socket.SHUT_RDWR)
- else:
- self.close()
- else:
- self._sock.shutdown(socket.SHUT_RDWR)
-
- def read(self, size=-1):
- remaining = object.__getattribute__(self, '_closeafterrecvbytes')
-
- # No read limit. Call original function.
- if not remaining:
- result = object.__getattribute__(self, '_orig').read(size)
- self._writelog(
- b'read(%d) -> (%d) (%s) %s' % (size, len(result), result)
- )
- return result
-
- origsize = size
-
- if size < 0:
- size = remaining
- else:
- size = min(remaining, size)
-
- result = object.__getattribute__(self, '_orig').read(size)
- remaining -= len(result)
-
- self._writelog(
- b'read(%d from %d) -> (%d) %s'
- % (size, origsize, len(result), result)
- )
-
- object.__setattr__(self, '_closeafterrecvbytes', remaining)
-
- if remaining <= 0:
- self._writelog(b'read limit reached, closing socket')
- self._close()
-
- # This is the easiest way to abort the current request.
- raise Exception('connection closed after receiving N bytes')
-
- return result
-
- def readline(self, size=-1):
- remaining = object.__getattribute__(self, '_closeafterrecvbytes')
-
- # No read limit. Call original function.
- if not remaining:
- result = object.__getattribute__(self, '_orig').readline(size)
- self._writelog(
- b'readline(%d) -> (%d) %s' % (size, len(result), result)
- )
- return result
-
- origsize = size
-
- if size < 0:
- size = remaining
- else:
- size = min(remaining, size)
-
- result = object.__getattribute__(self, '_orig').readline(size)
- remaining -= len(result)
-
- self._writelog(
- b'readline(%d from %d) -> (%d) %s'
- % (size, origsize, len(result), result)
- )
-
- object.__setattr__(self, '_closeafterrecvbytes', remaining)
-
- if remaining <= 0:
- self._writelog(b'read limit reached; closing socket')
- self._close()
-
- # This is the easiest way to abort the current request.
- raise Exception('connection closed after receiving N bytes')
-
- return result
-
- def write(self, data):
- remaining = object.__getattribute__(self, '_closeaftersendbytes')
-
- # No byte limit on this operation. Call original function.
- if not remaining:
- self._writelog(b'write(%d) -> %s' % (len(data), data))
- result = object.__getattribute__(self, '_orig').write(data)
- return result
-
- if len(data) > remaining:
- newdata = data[0:remaining]
- else:
- newdata = data
-
- remaining -= len(newdata)
-
- self._writelog(
- b'write(%d from %d) -> (%d) %s'
- % (len(newdata), len(data), remaining, newdata)
- )
-
- result = object.__getattribute__(self, '_orig').write(newdata)
-
- object.__setattr__(self, '_closeaftersendbytes', remaining)
-
- if remaining <= 0:
- self._writelog(b'write limit reached; closing socket')
- self._close()
-
- raise Exception('connection closed after sending N bytes')
-
- return result
-
-
-def extsetup(ui):
- # Change the base HTTP server class so various events can be performed.
- # See SocketServer.BaseServer for how the specially named methods work.
- class badserver(server.MercurialHTTPServer):
- def __init__(self, ui, *args, **kwargs):
- self._ui = ui
- super(badserver, self).__init__(ui, *args, **kwargs)
-
- recvbytes = self._ui.config(b'badserver', b'closeafterrecvbytes')
- recvbytes = recvbytes.split(b',')
- self.closeafterrecvbytes = [int(v) for v in recvbytes if v]
- sendbytes = self._ui.config(b'badserver', b'closeaftersendbytes')
- sendbytes = sendbytes.split(b',')
- self.closeaftersendbytes = [int(v) for v in sendbytes if v]
-
- # Need to inherit object so super() works.
- class badrequesthandler(self.RequestHandlerClass, object):
- def send_header(self, name, value):
- # Make headers deterministic to facilitate testing.
- if name.lower() == 'date':
- value = 'Fri, 14 Apr 2017 00:00:00 GMT'
- elif name.lower() == 'server':
- value = 'badhttpserver'
-
- return super(badrequesthandler, self).send_header(
- name, value
- )
-
- self.RequestHandlerClass = badrequesthandler
-
- # Called to accept() a pending socket.
- def get_request(self):
- if self._ui.configbool(b'badserver', b'closebeforeaccept'):
- self.socket.close()
-
- # Tells the server to stop processing more requests.
- self.__shutdown_request = True
-
- # Simulate failure to stop processing this request.
- raise socket.error('close before accept')
-
- if self._ui.configbool(b'badserver', b'closeafteraccept'):
- request, client_address = super(badserver, self).get_request()
- request.close()
- raise socket.error('close after accept')
-
- return super(badserver, self).get_request()
-
- # Does heavy lifting of processing a request. Invokes
- # self.finish_request() which calls self.RequestHandlerClass() which
- # is a hgweb.server._httprequesthandler.
- def process_request(self, socket, address):
- # Wrap socket in a proxy if we need to count bytes.
- if self.closeafterrecvbytes:
- closeafterrecvbytes = self.closeafterrecvbytes.pop(0)
- else:
- closeafterrecvbytes = 0
- if self.closeaftersendbytes:
- closeaftersendbytes = self.closeaftersendbytes.pop(0)
- else:
- closeaftersendbytes = 0
-
- if closeafterrecvbytes or closeaftersendbytes:
- socket = socketproxy(
- socket,
- self.errorlog,
- closeafterrecvbytes=closeafterrecvbytes,
- closeaftersendbytes=closeaftersendbytes,
- )
-
- return super(badserver, self).process_request(socket, address)
-
- server.MercurialHTTPServer = badserver
--- a/tests/failfilemerge.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/failfilemerge.py Fri Feb 18 14:27:43 2022 +0100
@@ -9,12 +9,9 @@
)
-def failfilemerge(
- filemergefn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
-):
+def failfilemerge(*args, **kwargs):
raise error.Abort(b"^C")
- return filemergefn(premerge, repo, mynode, orig, fcd, fco, fca, labels)
def extsetup(ui):
- extensions.wrapfunction(filemerge, '_filemerge', failfilemerge)
+ extensions.wrapfunction(filemerge, 'filemerge', failfilemerge)
--- a/tests/fakedirstatewritetime.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/fakedirstatewritetime.py Fri Feb 18 14:27:43 2022 +0100
@@ -9,7 +9,6 @@
from mercurial import (
context,
- dirstate,
dirstatemap as dirstatemapmod,
extensions,
policy,
@@ -38,14 +37,8 @@
has_rust_dirstate = policy.importrust('dirstate') is not None
-def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
- # execute what original parsers.pack_dirstate should do actually
- # for consistency
- for f, e in dmap.items():
- if e.need_delay(now):
- e.set_possibly_dirty()
-
- return orig(dmap, copymap, pl, fakenow)
+def pack_dirstate(orig, dmap, copymap, pl):
+ return orig(dmap, copymap, pl)
def fakewrite(ui, func):
@@ -62,30 +55,30 @@
# parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
# 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
- fakenow = timestamp.timestamp((fakenow, 0))
+ fakenow = timestamp.timestamp((fakenow, 0, False))
if has_rust_dirstate:
# The Rust implementation does not use public parse/pack dirstate
# to prevent conversion round-trips
orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
- wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
- self, tr, st, fakenow
- )
+ wrapper = lambda self, tr, st: orig_dirstatemap_write(self, tr, st)
dirstatemapmod.dirstatemap.write = wrapper
- orig_dirstate_getfsnow = dirstate._getfsnow
- wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
+ orig_get_fs_now = timestamp.get_fs_now
+ wrapper = lambda *args: pack_dirstate(orig_pack_dirstate, *args)
orig_module = parsers
orig_pack_dirstate = parsers.pack_dirstate
orig_module.pack_dirstate = wrapper
- dirstate._getfsnow = lambda *args: fakenow
+ timestamp.get_fs_now = (
+ lambda *args: fakenow
+ ) # XXX useless for this purpose now
try:
return func()
finally:
orig_module.pack_dirstate = orig_pack_dirstate
- dirstate._getfsnow = orig_dirstate_getfsnow
+ timestamp.get_fs_now = orig_get_fs_now
if has_rust_dirstate:
dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
--- a/tests/hghave.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/hghave.py Fri Feb 18 14:27:43 2022 +0100
@@ -663,6 +663,22 @@
return (major, minor) >= (2, 5)
+@check("pygments211", "Pygments version >= 2.11")
+def pygments211():
+ try:
+ import pygments
+
+ v = pygments.__version__
+ except ImportError:
+ return False
+
+ parts = v.split(".")
+ major = int(parts[0])
+ minor = int(parts[1])
+
+ return (major, minor) >= (2, 11)
+
+
@check("outer-repo", "outer repo")
def has_outer_repo():
# failing for other reasons than 'no repo' imply that there is a repo
--- a/tests/test-annotate.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-annotate.t Fri Feb 18 14:27:43 2022 +0100
@@ -221,16 +221,16 @@
a
a
a
- <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
+ <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
b4
c
b5
- ||||||| base
+ ||||||| common ancestor: 3086dbafde1c - test: b
=======
b4
b5
b6
- >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
+ >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
$ cat <<EOF > b
> a
> a
@@ -747,16 +747,16 @@
0
1 baz:1
2 baz:2
- <<<<<<< working copy: 863de62655ef - test: baz:3+->3-
+ <<<<<<< working copy: 863de62655ef - test: baz:3+->3-
3- baz:3
4 baz:4
- ||||||| base
+ ||||||| common ancestor: 56fc739c091f - test: baz:3->3+
3+ baz:3
4 baz:4
=======
3+ baz:3
4+ baz:4
- >>>>>>> merge rev: cb8df70ae185 - test: qux:4->4+
+ >>>>>>> merge rev: cb8df70ae185 - test: qux:4->4+
5
6
7
@@ -794,16 +794,16 @@
0
1 baz:1
2 baz:2
- <<<<<<< working copy: cb8df70ae185 - test: qux:4->4+
+ <<<<<<< working copy: cb8df70ae185 - test: qux:4->4+
3+ baz:3
4+ baz:4
- ||||||| base
+ ||||||| common ancestor: 56fc739c091f - test: baz:3->3+
3+ baz:3
4 baz:4
=======
3- baz:3
4 baz:4
- >>>>>>> merge rev: 863de62655ef - test: baz:3+->3-
+ >>>>>>> merge rev: 863de62655ef - test: baz:3+->3-
5
6
7
@@ -886,7 +886,7 @@
created new head
$ hg merge --tool :merge-other 24
merging baz
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci -m 'merge forgetting about baz rewrite'
$ cat > baz << EOF
@@ -1172,16 +1172,16 @@
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
[1]
$ cat a
- <<<<<<< working copy: 0a068f0261cf - test: 3
+ <<<<<<< working copy: 0a068f0261cf - test: 3
1
2
3
- ||||||| base
+ ||||||| common ancestor: 1ed24be7e7a0 - test: 2
1
2
=======
a
- >>>>>>> merge rev: 9409851bc20a - test: a
+ >>>>>>> merge rev: 9409851bc20a - test: a
$ cat > a << EOF
> b
> 1
--- a/tests/test-audit-path.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-audit-path.t Fri Feb 18 14:27:43 2022 +0100
@@ -8,7 +8,7 @@
$ hg add .hg/00changelog.i
abort: path contains illegal component: .hg/00changelog.i
- [255]
+ [10]
#if symlink
@@ -91,7 +91,7 @@
.hg/test
$ hg update -Cr0
abort: path contains illegal component: .hg/test
- [255]
+ [10]
attack foo/.hg/test
@@ -99,7 +99,7 @@
foo/.hg/test
$ hg update -Cr1
abort: path 'foo/.hg/test' is inside nested repo 'foo'
- [255]
+ [10]
attack back/test where back symlinks to ..
@@ -125,7 +125,7 @@
$ echo data > ../test/file
$ hg update -Cr3
abort: path contains illegal component: ../test
- [255]
+ [10]
$ cat ../test/file
data
@@ -135,7 +135,7 @@
/tmp/test
$ hg update -Cr4
abort: path contains illegal component: /tmp/test
- [255]
+ [10]
$ cd ..
--- a/tests/test-audit-subrepo.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-audit-subrepo.t Fri Feb 18 14:27:43 2022 +0100
@@ -10,7 +10,7 @@
$ echo 'sub/.hg = sub/.hg' >> .hgsub
$ hg ci -qAm 'add subrepo "sub/.hg"'
abort: path 'sub/.hg' is inside nested repo 'sub'
- [255]
+ [10]
prepare tampered repo (including the commit above):
@@ -34,7 +34,7 @@
$ hg clone -q hgname hgname2
abort: path 'sub/.hg' is inside nested repo 'sub'
- [255]
+ [10]
Test absolute path
------------------
@@ -47,7 +47,7 @@
$ echo '/sub = sub' >> .hgsub
$ hg ci -qAm 'add subrepo "/sub"'
abort: path contains illegal component: /sub
- [255]
+ [10]
prepare tampered repo (including the commit above):
@@ -71,7 +71,7 @@
$ hg clone -q absolutepath absolutepath2
abort: path contains illegal component: /sub
- [255]
+ [10]
Test root path
--------------
@@ -84,7 +84,7 @@
$ echo '/ = sub' >> .hgsub
$ hg ci -qAm 'add subrepo "/"'
abort: path ends in directory separator: /
- [255]
+ [10]
prepare tampered repo (including the commit above):
@@ -108,7 +108,7 @@
$ hg clone -q rootpath rootpath2
abort: path ends in directory separator: /
- [255]
+ [10]
Test empty path
---------------
@@ -197,7 +197,7 @@
$ echo '../sub = ../sub' >> .hgsub
$ hg ci -qAm 'add subrepo "../sub"'
abort: path contains illegal component: ../sub
- [255]
+ [10]
prepare tampered repo (including the commit above):
@@ -221,7 +221,7 @@
$ hg clone -q main main2
abort: path contains illegal component: ../sub
- [255]
+ [10]
$ cd ..
Test variable expansion
@@ -718,7 +718,7 @@
$ hg clone -q driveletter driveletter2
abort: path contains illegal component: X:
- [255]
+ [10]
#else
--- a/tests/test-bad-extension.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bad-extension.t Fri Feb 18 14:27:43 2022 +0100
@@ -52,16 +52,18 @@
> EOF
$ hg -q help help 2>&1 |grep extension
- *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
- *** failed to import extension badext2: No module named *badext2* (glob)
+ *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
+ *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+ *** failed to import extension "badext2": No module named badext2 (no-py3 !)
show traceback
$ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound'
- *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
+ *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
Traceback (most recent call last):
Exception: bit bucket overflow
- *** failed to import extension badext2: No module named *badext2* (glob)
+ *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+ *** failed to import extension "badext2": No module named badext2 (no-py3 !)
Traceback (most recent call last):
ImportError: No module named badext2 (no-py3 !)
ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
@@ -101,7 +103,7 @@
YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: gpg
YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext
- *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
+ *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
Traceback (most recent call last):
Exception: bit bucket overflow
YYYY/MM/DD HH:MM:SS (PID)> - loading extension: baddocext
@@ -123,7 +125,8 @@
Traceback (most recent call last): (py3 !)
ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
- *** failed to import extension badext2: No module named *badext2* (glob)
+ *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+ *** failed to import extension "badext2": No module named badext2 (no-py3 !)
Traceback (most recent call last):
ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
@@ -160,8 +163,9 @@
confirm that there's no crash when an extension's documentation is bad
$ hg help --keyword baddocext
- *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
- *** failed to import extension badext2: No module named *badext2* (glob)
+ *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
+ *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+ *** failed to import extension "badext2": No module named badext2 (no-py3 !)
Topics:
extensions Using Additional Features
--- a/tests/test-basic.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-basic.t Fri Feb 18 14:27:43 2022 +0100
@@ -40,7 +40,7 @@
A a
$ hg status >/dev/full
- abort: No space left on device
+ abort: No space left on device* (glob)
[255]
#endif
--- a/tests/test-blackbox.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-blackbox.t Fri Feb 18 14:27:43 2022 +0100
@@ -32,10 +32,10 @@
$ echo a > a
$ hg add a
$ hg blackbox --config blackbox.dirty=True
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> init blackboxtest exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000+ (5000)> blackbox --config *blackbox.dirty=True* (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> init blackboxtest exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> add a
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> add a exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000+ (5000)> blackbox --config *blackbox.dirty=True* (glob)
failure exit code
$ rm ./.hg/blackbox.log
@@ -43,17 +43,17 @@
non-existent: $ENOENT$
[1]
$ hg blackbox
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add non-existent
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add non-existent exited 1 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> add non-existent
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> add non-existent exited 1 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox
abort exit code
$ rm ./.hg/blackbox.log
$ hg abortcmd 2> /dev/null
[255]
$ hg blackbox -l 2
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> abortcmd exited 255 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox -l 2
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> abortcmd exited 255 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox -l 2
unhandled exception
$ rm ./.hg/blackbox.log
@@ -67,27 +67,27 @@
[1]
#endif
$ hg blackbox -l 2
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> crash exited 1 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox -l 2
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> crash exited 1 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox -l 2
alias expansion is logged
$ rm ./.hg/blackbox.log
$ hg confuse
$ hg blackbox
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> confuse
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> alias 'confuse' expands to 'log --limit 3'
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> confuse exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> confuse
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> alias 'confuse' expands to 'log --limit 3'
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> confuse exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox
recursive aliases work correctly
$ rm ./.hg/blackbox.log
$ hg so-confusing
$ hg blackbox
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> so-confusing
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> alias 'so-confusing' expands to 'confuse --style compact'
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> alias 'confuse' expands to 'log --limit 3'
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> so-confusing exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> so-confusing
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> alias 'so-confusing' expands to 'confuse --style compact'
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> alias 'confuse' expands to 'log --limit 3'
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> so-confusing exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox
custom date format
$ rm ./.hg/blackbox.log
@@ -97,7 +97,7 @@
$ hg blackbox
2012-04-13 @ 20:13:13 bob @0000000000000000000000000000000000000000 (5000)> --config *blackbox.date-format=%Y-%m-%d @ %H:%M:%S* --config *devel.default-date=1334347993 0* --traceback status (glob)
2012-04-13 @ 20:13:13 bob @0000000000000000000000000000000000000000 (5000)> --config *blackbox.date-format=%Y-%m-%d @ %H:%M:%S* --config *devel.default-date=1334347993 0* --traceback status exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox
incoming change tracking
@@ -128,12 +128,12 @@
new changesets d02f48003e62
(run 'hg update' to get a working copy)
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served) with 1 labels and 2 nodes
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (served.hidden) in * seconds (glob)
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served.hidden) with 1 labels and 2 nodes
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served) with 1 labels and 2 nodes
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (served.hidden) in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served.hidden) with 1 labels and 2 nodes
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
we must not cause a failure if we cannot write to the log
@@ -191,12 +191,12 @@
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (immutable) in * seconds (glob)
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (immutable) with 1 labels and 2 nodes
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (immutable) in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (immutable) with 1 labels and 2 nodes
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
extension and python hooks - use the eol extension for a pythonhook
@@ -217,12 +217,12 @@
> eol=!
> EOF
$ hg blackbox -l 5
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> update (no-chg !)
- 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> update (no-chg !)
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
+ 1970-01-01 00:00:00.000 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
log rotation
@@ -243,8 +243,8 @@
$ hg init blackboxtest3
$ cd blackboxtest3
$ hg blackbox
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> init blackboxtest3 exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> init blackboxtest3 exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> blackbox
$ mv .hg/blackbox.log .hg/blackbox.log-
$ mkdir .hg/blackbox.log
$ sed -e 's/\(.*test1.*\)/#\1/; s#\(.*commit2.*\)#os.rmdir(".hg/blackbox.log")\
@@ -306,16 +306,16 @@
result: 0
$ hg blackbox
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updating the branch cache
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updated branch cache (served) in * seconds (glob)
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> wrote branch cache (served) with 1 labels and 1 nodes
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug commit -m commit2 -d 2000-01-02 foo exited 0 after *.?? seconds (glob)
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> writing .hg/cache/tags2-visible with 0 tags
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0 exited 0 after *.?? seconds (glob)
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip exited 0 after *.?? seconds (glob)
- 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> blackbox
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updating the branch cache
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> updated branch cache (served) in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> wrote branch cache (served) with 1 labels and 1 nodes
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug commit -m commit2 -d 2000-01-02 foo exited 0 after *.?? seconds (glob)
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> writing .hg/cache/tags2-visible with 0 tags
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r 0 exited 0 after *.?? seconds (glob)
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> --debug log -r tip exited 0 after *.?? seconds (glob)
+ 1970-01-01 00:00:00.000 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> blackbox
Skip rotation if the .hg is read-only
@@ -394,8 +394,8 @@
(only look for entries with specific logged sources, otherwise this test is
pretty brittle)
$ hg blackbox | egrep '\[command(finish)?\]'
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000) [commandfinish]> --config *blackbox.track=* --config *blackbox.logsource=True* init track_star exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000) [command]> blackbox
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000) [commandfinish]> --config *blackbox.track=* --config *blackbox.logsource=True* init track_star exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000) [command]> blackbox
$ cd $TESTTMP
#if chg
@@ -493,7 +493,7 @@
#endif
$ head -1 .hg/blackbox.log
- 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> ** Unknown exception encountered with possibly-broken third-party extension "mock" (version N/A)
+ 1970-01-01 00:00:00.000 bob @0000000000000000000000000000000000000000 (5000)> ** Unknown exception encountered with possibly-broken third-party extension "mock" (version N/A)
$ tail -2 .hg/blackbox.log
RuntimeError: raise
--- a/tests/test-bookmarks-current.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bookmarks-current.t Fri Feb 18 14:27:43 2022 +0100
@@ -245,4 +245,4 @@
$ hg bookmarks --inactive
$ hg bookmarks -ql .
abort: no active bookmark
- [255]
+ [10]
--- a/tests/test-bookmarks-pushpull.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bookmarks-pushpull.t Fri Feb 18 14:27:43 2022 +0100
@@ -357,7 +357,7 @@
(leaving bookmark V)
$ hg push -B . ../a
abort: no active bookmark
- [255]
+ [10]
$ hg update -r V
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
(activating bookmark V)
@@ -715,14 +715,15 @@
$ cat <<EOF > ../lookuphook.py
> """small extensions adding a hook after wireprotocol lookup to test race"""
> import functools
- > from mercurial import wireprotov1server, wireprotov2server
+ > from mercurial import wireprotov1server
>
> def wrappedlookup(orig, repo, *args, **kwargs):
> ret = orig(repo, *args, **kwargs)
> repo.hook(b'lookup')
> return ret
- > for table in [wireprotov1server.commands, wireprotov2server.COMMANDS]:
- > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func)
+ >
+ > table = wireprotov1server.commands
+ > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func)
> EOF
$ cat <<EOF > ../pull-race/.hg/hgrc
> [extensions]
--- a/tests/test-bookmarks.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bookmarks.t Fri Feb 18 14:27:43 2022 +0100
@@ -278,7 +278,7 @@
$ hg book -i rename-me
$ hg book -m . renamed
abort: no active bookmark
- [255]
+ [10]
$ hg up -q Y
$ hg book -d rename-me
@@ -298,7 +298,7 @@
$ hg book -i delete-me
$ hg book -d .
abort: no active bookmark
- [255]
+ [10]
$ hg up -q Y
$ hg book -d delete-me
--- a/tests/test-branch-change.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-branch-change.t Fri Feb 18 14:27:43 2022 +0100
@@ -185,6 +185,7 @@
changed branch on 2 changesets
updating the branch cache
invalid branch cache (served): tip differs
+ invalid branch cache (served.hidden): tip differs
$ hg glog -r '(.^)::'
@ 9:de1404b45a69 Added e
@@ -211,7 +212,7 @@
secret 11:38a9b2d53f98
foo 7:8a4729a5e2b8
wat 9:de1404b45a69 (inactive)
- default 2:28ad74487de9 (inactive)
+ default 1:29becc82797a (inactive)
$ hg branch
secret
--- a/tests/test-branch-option.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-branch-option.t Fri Feb 18 14:27:43 2022 +0100
@@ -58,12 +58,12 @@
$ hg in -qbz
abort: unknown branch 'z'
- [255]
+ [10]
$ hg in -q ../branch#z
2:f25d57ab0566
$ hg out -qbz
abort: unknown branch 'z'
- [255]
+ [10]
in rev c branch a
--- a/tests/test-bundle.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bundle.t Fri Feb 18 14:27:43 2022 +0100
@@ -292,32 +292,56 @@
packed1 is produced properly
-#if reporevlogstore
+
+#if reporevlogstore rust
$ hg -R test debugcreatestreamclonebundle packed.hg
- writing 2664 bytes for 6 files (no-zstd !)
- writing 2665 bytes for 6 files (zstd !)
- bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
- bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
+ writing 2665 bytes for 6 files
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
+
+ $ f -B 64 --size --sha1 --hexdump packed.hg
+ packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
+ 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
+ 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
+ 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
+ $ hg debugbundle --spec packed.hg
+ none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
+#endif
+
+#if reporevlogstore no-rust zstd
+
+ $ hg -R test debugcreatestreamclonebundle packed.hg
+ writing 2665 bytes for 6 files
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
$ f -B 64 --size --sha1 --hexdump packed.hg
- packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5 (no-zstd !)
- packed.hg: size=2841, sha1=8b645a65f49b0ae43042a9f3da56d4bfdf1c7f99 (zstd no-rust !)
- packed.hg: size=2860, sha1=81d7a2e535892cda51e82c200f818de2cca828d3 (rust !)
+ packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
- 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald| (no-zstd !)
- 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp| (no-zstd !)
- 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/| (no-zstd !)
- 0010: 00 00 00 00 0a 69 00 23 67 65 6e 65 72 61 6c 64 |.....i.#generald| (zstd no-rust !)
- 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp| (zstd no-rust !)
- 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/| (zstd no-rust !)
- 0010: 00 00 00 00 0a 69 00 36 67 65 6e 65 72 61 6c 64 |.....i.6generald| (rust !)
- 0020: 65 6c 74 61 2c 70 65 72 73 69 73 74 65 6e 74 2d |elta,persistent-| (rust !)
- 0030: 6e 6f 64 65 6d 61 70 2c 72 65 76 6c 6f 67 76 31 |nodemap,revlogv1| (rust !)
+ 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
+ 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
+ $ hg debugbundle --spec packed.hg
+ none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
+#endif
+
+#if reporevlogstore no-rust no-zstd
+ $ hg -R test debugcreatestreamclonebundle packed.hg
+ writing 2664 bytes for 6 files
+ bundle requirements: generaldelta, revlogv1, sparserevlog
+
+ $ f -B 64 --size --sha1 --hexdump packed.hg
+ packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
+ 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
+ 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
+ 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
$ hg debugbundle --spec packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (no-rust !)
- none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog (rust !)
+ none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
+#endif
+
+#if reporevlogstore
generaldelta requirement is not listed in stream clone bundles unless used
@@ -326,25 +350,66 @@
$ touch foo
$ hg -q commit -A -m initial
$ cd ..
+
+#endif
+
+#if reporevlogstore rust
+
$ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
writing 301 bytes for 3 files
- bundle requirements: revlogv1 (no-rust !)
- bundle requirements: persistent-nodemap, revlogv1 (rust !)
+ bundle requirements: revlog-compression-zstd, revlogv1
+
+ $ f -B 64 --size --sha1 --hexdump packednongd.hg
+ packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
+ 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
+ 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
+ 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
+ 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
+
+ $ hg debugbundle --spec packednongd.hg
+ none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
+
+#endif
+
+#if reporevlogstore no-rust zstd
+
+ $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
+ writing 301 bytes for 3 files
+ bundle requirements: revlog-compression-zstd, revlogv1
$ f -B 64 --size --sha1 --hexdump packednongd.hg
- packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f (no-rust !)
- packednongd.hg: size=402, sha1=d3cc1417f0e8142cf9340aaaa520b660ad3ec3ea (rust !)
+ packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
- 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1| (no-rust !)
- 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..| (no-rust !)
- 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| (no-rust !)
- 0010: 00 00 00 00 01 2d 00 1c 70 65 72 73 69 73 74 65 |.....-..persiste| (rust !)
- 0020: 6e 74 2d 6e 6f 64 65 6d 61 70 2c 72 65 76 6c 6f |nt-nodemap,revlo| (rust !)
- 0030: 67 76 31 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 |gv1.data/foo.i.6| (rust !)
+ 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
+ 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
+ 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
$ hg debugbundle --spec packednongd.hg
- none-packed1;requirements%3Drevlogv1 (no-rust !)
- none-packed1;requirements%3Dpersistent-nodemap%2Crevlogv1 (rust !)
+ none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
+
+
+#endif
+
+#if reporevlogstore no-rust no-zstd
+
+ $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
+ writing 301 bytes for 3 files
+ bundle requirements: revlogv1
+
+ $ f -B 64 --size --sha1 --hexdump packednongd.hg
+ packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
+ 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
+ 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
+ 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
+ 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+
+ $ hg debugbundle --spec packednongd.hg
+ none-packed1;requirements%3Drevlogv1
+
+
+#endif
+
+#if reporevlogstore
Warning emitted when packed bundles contain secret changesets
@@ -355,11 +420,36 @@
$ hg phase --force --secret -r .
$ cd ..
+#endif
+
+#if reporevlogstore rust
+
$ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
(warning: stream clone bundle will contain secret revisions)
writing 301 bytes for 3 files
- bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
- bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
+
+#endif
+
+#if reporevlogstore no-rust zstd
+
+ $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
+ (warning: stream clone bundle will contain secret revisions)
+ writing 301 bytes for 3 files
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
+
+#endif
+
+#if reporevlogstore no-rust no-zstd
+
+ $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
+ (warning: stream clone bundle will contain secret revisions)
+ writing 301 bytes for 3 files
+ bundle requirements: generaldelta, revlogv1, sparserevlog
+
+#endif
+
+#if reporevlogstore
Unpacking packed1 bundles with "hg unbundle" isn't allowed
@@ -716,7 +806,7 @@
$ hg incoming '../test#bundle.hg'
comparing with ../test
abort: unknown revision 'bundle.hg'
- [255]
+ [10]
note that percent encoding is not handled:
--- a/tests/test-bundle2-exchange.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bundle2-exchange.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
Test exchange of common information using bundle2
--- a/tests/test-bundle2-pushback.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bundle2-pushback.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
$ cat > bundle2.py << EOF
> """A small extension to test bundle2 pushback parts.
> Current bundle2 implementation doesn't provide a way to generate those
--- a/tests/test-bundle2-remote-changegroup.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-bundle2-remote-changegroup.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
Create an extension to test bundle2 remote-changegroup parts
$ cat > bundle2.py << EOF
--- a/tests/test-casecollision.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-casecollision.t Fri Feb 18 14:27:43 2022 +0100
@@ -12,7 +12,7 @@
? A
$ hg add --config ui.portablefilenames=abort A
abort: possible case-folding collision for A
- [255]
+ [20]
$ hg st
A a
? A
--- a/tests/test-check-code.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-check-code.t Fri Feb 18 14:27:43 2022 +0100
@@ -33,7 +33,7 @@
Skipping contrib/packaging/hgpackaging/wix.py it has no-che?k-code (glob)
Skipping i18n/polib.py it has no-che?k-code (glob)
Skipping mercurial/statprof.py it has no-che?k-code (glob)
- Skipping tests/badserverext.py it has no-che?k-code (glob)
+ Skipping tests/testlib/badserverext.py it has no-che?k-code (glob)
@commands in debugcommands.py should be in alphabetical order.
--- a/tests/test-check-interfaces.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-check-interfaces.py Fri Feb 18 14:27:43 2022 +0100
@@ -39,7 +39,6 @@
wireprotoserver,
wireprototypes,
wireprotov1peer,
- wireprotov2server,
)
testdir = os.path.dirname(__file__)
@@ -129,9 +128,6 @@
ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
- ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
- checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
-
ziverify.verifyClass(repository.ipeerbase, localrepo.localpeer)
checkzobject(localrepo.localpeer(dummyrepo()))
@@ -158,19 +154,6 @@
)
)
- ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
- checkzobject(
- sshpeer.sshv2peer(
- ui,
- b'ssh://localhost/foo',
- b'',
- dummypipe(),
- dummypipe(),
- None,
- None,
- )
- )
-
ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
checkzobject(bundlerepo.bundlepeer(dummyrepo()))
@@ -193,26 +176,15 @@
wireprototypes.baseprotocolhandler, wireprotoserver.sshv1protocolhandler
)
ziverify.verifyClass(
- wireprototypes.baseprotocolhandler, wireprotoserver.sshv2protocolhandler
- )
- ziverify.verifyClass(
wireprototypes.baseprotocolhandler,
wireprotoserver.httpv1protocolhandler,
)
- ziverify.verifyClass(
- wireprototypes.baseprotocolhandler,
- wireprotov2server.httpv2protocolhandler,
- )
sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
checkzobject(sshv1)
- sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
- checkzobject(sshv2)
httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
checkzobject(httpv1)
- httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
- checkzobject(httpv2)
ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
--- a/tests/test-check-module-imports.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-check-module-imports.t Fri Feb 18 14:27:43 2022 +0100
@@ -41,4 +41,5 @@
> -X tests/test-demandimport.py \
> -X tests/test-imports-checker.t \
> -X tests/test-verify-repo-operations.py \
+ > -X tests/test-extension.t \
> | sed 's-\\-/-g' | "$PYTHON" "$import_checker" -
--- a/tests/test-check-pytype.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-check-pytype.t Fri Feb 18 14:27:43 2022 +0100
@@ -10,94 +10,63 @@
probably hiding real problems.
mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
-mercurial/changegroup.py # mysterious incorrect type detection
-mercurial/chgserver.py # [attribute-error]
-mercurial/cmdutil.py # No attribute 'markcopied' on mercurial.context.filectx [attribute-error]
mercurial/context.py # many [attribute-error]
-mercurial/copies.py # No attribute 'items' on None [attribute-error]
mercurial/crecord.py # tons of [attribute-error], [module-attr]
mercurial/debugcommands.py # [wrong-arg-types]
mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error]
mercurial/exchange.py # [attribute-error]
mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types]
mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr]
-mercurial/hgweb/webcommands.py # [missing-parameter]
mercurial/hgweb/wsgicgi.py # confused values in os.environ
mercurial/httppeer.py # [attribute-error], [wrong-arg-types]
mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
mercurial/keepalive.py # [attribute-error]
mercurial/localrepo.py # [attribute-error]
-mercurial/lsprof.py # unguarded import
mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
mercurial/minirst.py # [unsupported-operands], [attribute-error]
-mercurial/patch.py # [wrong-arg-types]
mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
mercurial/pure/parsers.py # [attribute-error]
-mercurial/pycompat.py # bytes vs str issues
mercurial/repoview.py # [attribute-error]
-mercurial/sslutil.py # [attribute-error]
-mercurial/statprof.py # bytes vs str on TextIO.write() [wrong-arg-types]
mercurial/testing/storage.py # tons of [attribute-error]
mercurial/ui.py # [attribute-error], [wrong-arg-types]
mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
-mercurial/upgrade.py # line 84, in upgraderepo: No attribute 'discard' on Dict[nothing, nothing] [attribute-error]
-mercurial/util.py # [attribute-error], [wrong-arg-count]
-mercurial/utils/procutil.py # [attribute-error], [module-attr], [bad-return-type]
-mercurial/utils/stringutil.py # [module-attr], [wrong-arg-count]
mercurial/utils/memorytop.py # not 3.6 compatible
mercurial/win32.py # [not-callable]
mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
-mercurial/wireprotoserver.py # line 253, in _availableapis: No attribute '__iter__' on Callable[[Any, Any], Any] [attribute-error]
mercurial/wireprotov1peer.py # [attribute-error]
mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs
-mercurial/wireprotov2server.py # [unsupported-operands], [attribute-error]
TODO: use --no-cache on test server? Caching the files locally helps during
development, but may be a hinderance for CI testing.
$ pytype -V 3.6 --keep-going --jobs auto mercurial \
> -x mercurial/bundlerepo.py \
- > -x mercurial/changegroup.py \
- > -x mercurial/chgserver.py \
- > -x mercurial/cmdutil.py \
> -x mercurial/context.py \
- > -x mercurial/copies.py \
> -x mercurial/crecord.py \
> -x mercurial/debugcommands.py \
> -x mercurial/dispatch.py \
> -x mercurial/exchange.py \
> -x mercurial/hgweb/hgweb_mod.py \
> -x mercurial/hgweb/server.py \
- > -x mercurial/hgweb/webcommands.py \
> -x mercurial/hgweb/wsgicgi.py \
> -x mercurial/httppeer.py \
> -x mercurial/interfaces \
> -x mercurial/keepalive.py \
> -x mercurial/localrepo.py \
- > -x mercurial/lsprof.py \
> -x mercurial/manifest.py \
> -x mercurial/minirst.py \
- > -x mercurial/patch.py \
> -x mercurial/pure/osutil.py \
> -x mercurial/pure/parsers.py \
- > -x mercurial/pycompat.py \
> -x mercurial/repoview.py \
- > -x mercurial/sslutil.py \
- > -x mercurial/statprof.py \
> -x mercurial/testing/storage.py \
> -x mercurial/thirdparty \
> -x mercurial/ui.py \
> -x mercurial/unionrepo.py \
- > -x mercurial/upgrade.py \
- > -x mercurial/utils/procutil.py \
- > -x mercurial/utils/stringutil.py \
> -x mercurial/utils/memorytop.py \
> -x mercurial/win32.py \
> -x mercurial/wireprotoframing.py \
- > -x mercurial/wireprotoserver.py \
> -x mercurial/wireprotov1peer.py \
> -x mercurial/wireprotov1server.py \
- > -x mercurial/wireprotov2server.py \
> > $TESTTMP/pytype-output.txt || cat $TESTTMP/pytype-output.txt
Only show the results on a failure, because the output on success is also
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-clone-stream-format.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,435 @@
+This file contains tests case that deal with format change accross stream clone
+
+#require serve no-reposimplestore no-chg
+
+#testcases stream-legacy stream-bundle2
+
+ $ cat << EOF >> $HGRCPATH
+ > [storage]
+ > revlog.persistent-nodemap.slow-path=allow
+ > EOF
+
+#if stream-legacy
+ $ cat << EOF >> $HGRCPATH
+ > [server]
+ > bundle2.stream = no
+ > EOF
+#endif
+
+Initialize repository
+
+ $ hg init server --config format.use-share-safe=yes --config format.use-persistent-nodemap=yes
+ $ cd server
+ $ sh $TESTDIR/testlib/stream_clone_setup.sh
+ adding 00changelog-ab349180a0405010.nd
+ adding 00changelog.d
+ adding 00changelog.i
+ adding 00changelog.n
+ adding 00manifest.d
+ adding 00manifest.i
+ adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
+ adding data/foo.d
+ adding data/foo.i
+ adding data/foo.n
+ adding data/undo.babar
+ adding data/undo.d
+ adding data/undo.foo.d
+ adding data/undo.foo.i
+ adding data/undo.foo.n
+ adding data/undo.i
+ adding data/undo.n
+ adding data/undo.py
+ adding foo.d
+ adding foo.i
+ adding foo.n
+ adding meta/foo.d
+ adding meta/foo.i
+ adding meta/foo.n
+ adding meta/undo.babar
+ adding meta/undo.d
+ adding meta/undo.foo.d
+ adding meta/undo.foo.i
+ adding meta/undo.foo.n
+ adding meta/undo.i
+ adding meta/undo.n
+ adding meta/undo.py
+ adding savanah/foo.d
+ adding savanah/foo.i
+ adding savanah/foo.n
+ adding savanah/undo.babar
+ adding savanah/undo.d
+ adding savanah/undo.foo.d
+ adding savanah/undo.foo.i
+ adding savanah/undo.foo.n
+ adding savanah/undo.i
+ adding savanah/undo.n
+ adding savanah/undo.py
+ adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
+ adding store/foo.d
+ adding store/foo.i
+ adding store/foo.n
+ adding store/undo.babar
+ adding store/undo.d
+ adding store/undo.foo.d
+ adding store/undo.foo.i
+ adding store/undo.foo.n
+ adding store/undo.i
+ adding store/undo.n
+ adding store/undo.py
+ adding undo.babar
+ adding undo.d
+ adding undo.foo.d
+ adding undo.foo.i
+ adding undo.foo.n
+ adding undo.i
+ adding undo.n
+ adding undo.py
+ $ hg debugbuilddag .+5000 --from-existing
+ $ ls -1 .hg/store/00changelog*
+ .hg/store/00changelog-*.nd (glob)
+ .hg/store/00changelog.d
+ .hg/store/00changelog.i
+ .hg/store/00changelog.n
+ $ cd ..
+
+
+Test streaming from/to repository without a store:
+==================================================
+
+ $ hg clone --pull --config format.usestore=no server server-no-store
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5004 changesets with 1088 changes to 1088 files (+1 heads)
+ new changesets 96ee1d7354c4:06ddac466af5
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg verify -R server-no-store
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+ $ hg -R server-no-store serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
+ $ cat hg-2.pid >> $DAEMON_PIDS
+ $ hg debugrequires -R server | grep store
+ store
+ $ hg debugrequires -R server-no-store | grep store
+ [1]
+
+store → no-store cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-store --config format.usestore=no
+ $ cat errors-1.txt
+ $ hg -R clone-remove-store verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-remove-store | grep store
+ [1]
+
+
+no-store → store cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-store --config format.usestore=yes
+ $ cat errors-2.txt
+ $ hg -R clone-add-store verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-add-store | grep store
+ store
+
+
+ $ killdaemons.py
+
+
+Test streaming from/to repository without a fncache
+===================================================
+
+ $ rm hg-*.pid errors-*.txt
+ $ hg clone --pull --config format.usefncache=no server server-no-fncache
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5004 changesets with 1088 changes to 1088 files (+1 heads)
+ new changesets 96ee1d7354c4:06ddac466af5
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg verify -R server-no-fncache
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+ $ hg -R server-no-fncache serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
+ $ cat hg-2.pid >> $DAEMON_PIDS
+ $ hg debugrequires -R server | grep fncache
+ fncache
+ $ hg debugrequires -R server-no-fncache | grep fncache
+ [1]
+
+fncache → no-fncache cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-fncache --config format.usefncache=no
+ $ cat errors-1.txt
+ $ hg -R clone-remove-fncache verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-remove-fncache | grep fncache
+ [1]
+
+
+no-fncache → fncache cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-fncache --config format.usefncache=yes
+ $ cat errors-2.txt
+ $ hg -R clone-add-fncache verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-add-fncache | grep fncache
+ fncache
+
+
+ $ killdaemons.py
+
+
+
+Test streaming from/to repository without a dotencode
+===================================================
+
+ $ rm hg-*.pid errors-*.txt
+ $ hg clone --pull --config format.dotencode=no server server-no-dotencode
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5004 changesets with 1088 changes to 1088 files (+1 heads)
+ new changesets 96ee1d7354c4:06ddac466af5
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg verify -R server-no-dotencode
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+ $ hg -R server-no-dotencode serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
+ $ cat hg-2.pid >> $DAEMON_PIDS
+ $ hg debugrequires -R server | grep dotencode
+ dotencode
+ $ hg debugrequires -R server-no-dotencode | grep dotencode
+ [1]
+
+dotencode → no-dotencode cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-dotencode --config format.dotencode=no
+ $ cat errors-1.txt
+ $ hg -R clone-remove-dotencode verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-remove-dotencode | grep dotencode
+ [1]
+
+
+no-dotencode → dotencode cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-dotencode --config format.dotencode=yes
+ $ cat errors-2.txt
+ $ hg -R clone-add-dotencode verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-add-dotencode | grep dotencode
+ dotencode
+
+
+ $ killdaemons.py
+
+Cloning from a share
+--------------------
+
+We should be able to clone from a "share" repository, it will use the source store for streaming.
+
+The resulting clone should not use share.
+
+ $ rm hg-*.pid errors-*.txt
+ $ hg share --config extensions.share= server server-share -U
+ $ hg -R server-share serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-from-share
+ $ hg -R clone-from-share verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-from-share | egrep 'share$'
+ [1]
+
+ $ killdaemons.py
+
+Test streaming from/to repository without a share-safe
+======================================================
+
+ $ rm hg-*.pid errors-*.txt
+ $ hg clone --pull --config format.use-share-safe=no server server-no-share-safe
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5004 changesets with 1088 changes to 1088 files (+1 heads)
+ new changesets 96ee1d7354c4:06ddac466af5
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg verify -R server-no-share-safe
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+ $ hg -R server-no-share-safe serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
+ $ cat hg-2.pid >> $DAEMON_PIDS
+ $ hg debugrequires -R server | grep share-safe
+ share-safe
+ $ hg debugrequires -R server-no-share-safe | grep share-safe
+ [1]
+
+share-safe → no-share-safe cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-share-safe --config format.use-share-safe=no
+ $ cat errors-1.txt
+ $ hg -R clone-remove-share-safe verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-remove-share-safe | grep share-safe
+ [1]
+
+
+no-share-safe → share-safe cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-share-safe --config format.use-share-safe=yes
+ $ cat errors-2.txt
+ $ hg -R clone-add-share-safe verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-add-share-safe | grep share-safe
+ share-safe
+
+
+ $ killdaemons.py
+
+
+Test streaming from/to repository without a persistent-nodemap
+==============================================================
+
+persistent nodemap affects revlog, but they are easy to generate locally, so we allow it to be changed over a stream clone
+
+ $ rm hg-*.pid errors-*.txt
+ $ hg clone --pull --config format.use-persistent-nodemap=no server server-no-persistent-nodemap
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5004 changesets with 1088 changes to 1088 files (+1 heads)
+ new changesets 96ee1d7354c4:06ddac466af5
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg verify -R server-no-persistent-nodemap
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
+ $ cat hg-1.pid > $DAEMON_PIDS
+ $ hg -R server-no-persistent-nodemap serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
+ $ cat hg-2.pid >> $DAEMON_PIDS
+ $ hg debugrequires -R server | grep persistent-nodemap
+ persistent-nodemap
+ $ hg debugrequires -R server-no-persistent-nodemap | grep persistent-nodemap
+ [1]
+ $ ls -1 server/.hg/store/00changelog*
+ server/.hg/store/00changelog-*.nd (glob)
+ server/.hg/store/00changelog.d
+ server/.hg/store/00changelog.i
+ server/.hg/store/00changelog.n
+ $ ls -1 server-no-persistent-nodemap/.hg/store/00changelog*
+ server-no-persistent-nodemap/.hg/store/00changelog.d
+ server-no-persistent-nodemap/.hg/store/00changelog.i
+
+persistent-nodemap → no-persistent-nodemap cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-persistent-nodemap --config format.use-persistent-nodemap=no
+ $ cat errors-1.txt
+ $ hg -R clone-remove-persistent-nodemap verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-remove-persistent-nodemap | grep persistent-nodemap
+ [1]
+
+The persistent-nodemap files should no longer exists
+
+ $ ls -1 clone-remove-persistent-nodemap/.hg/store/00changelog*
+ clone-remove-persistent-nodemap/.hg/store/00changelog.d
+ clone-remove-persistent-nodemap/.hg/store/00changelog.i
+
+
+no-persistent-nodemap → persistent-nodemap cloning
+
+ $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-persistent-nodemap --config format.use-persistent-nodemap=yes
+ $ cat errors-2.txt
+ $ hg -R clone-add-persistent-nodemap verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 5004 changesets with 1088 changes to 1088 files
+ $ hg debugrequires -R clone-add-persistent-nodemap | grep persistent-nodemap
+ persistent-nodemap
+
+The persistent-nodemap files should exists
+
+ $ ls -1 clone-add-persistent-nodemap/.hg/store/00changelog*
+ clone-add-persistent-nodemap/.hg/store/00changelog-*.nd (glob)
+ clone-add-persistent-nodemap/.hg/store/00changelog.d
+ clone-add-persistent-nodemap/.hg/store/00changelog.i
+ clone-add-persistent-nodemap/.hg/store/00changelog.n
+
+
+ $ killdaemons.py
--- a/tests/test-clone-stream.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-clone-stream.t Fri Feb 18 14:27:43 2022 +0100
@@ -10,98 +10,10 @@
#endif
Initialize repository
-the status call is to check for issue5130
$ hg init server
$ cd server
- $ touch foo
- $ hg -q commit -A -m initial
- >>> for i in range(1024):
- ... with open(str(i), 'wb') as fh:
- ... fh.write(b"%d" % i) and None
- $ hg -q commit -A -m 'add a lot of files'
- $ hg st
-
-add files with "tricky" name:
-
- $ echo foo > 00changelog.i
- $ echo foo > 00changelog.d
- $ echo foo > 00changelog.n
- $ echo foo > 00changelog-ab349180a0405010.nd
- $ echo foo > 00manifest.i
- $ echo foo > 00manifest.d
- $ echo foo > foo.i
- $ echo foo > foo.d
- $ echo foo > foo.n
- $ echo foo > undo.py
- $ echo foo > undo.i
- $ echo foo > undo.d
- $ echo foo > undo.n
- $ echo foo > undo.foo.i
- $ echo foo > undo.foo.d
- $ echo foo > undo.foo.n
- $ echo foo > undo.babar
- $ mkdir savanah
- $ echo foo > savanah/foo.i
- $ echo foo > savanah/foo.d
- $ echo foo > savanah/foo.n
- $ echo foo > savanah/undo.py
- $ echo foo > savanah/undo.i
- $ echo foo > savanah/undo.d
- $ echo foo > savanah/undo.n
- $ echo foo > savanah/undo.foo.i
- $ echo foo > savanah/undo.foo.d
- $ echo foo > savanah/undo.foo.n
- $ echo foo > savanah/undo.babar
- $ mkdir data
- $ echo foo > data/foo.i
- $ echo foo > data/foo.d
- $ echo foo > data/foo.n
- $ echo foo > data/undo.py
- $ echo foo > data/undo.i
- $ echo foo > data/undo.d
- $ echo foo > data/undo.n
- $ echo foo > data/undo.foo.i
- $ echo foo > data/undo.foo.d
- $ echo foo > data/undo.foo.n
- $ echo foo > data/undo.babar
- $ mkdir meta
- $ echo foo > meta/foo.i
- $ echo foo > meta/foo.d
- $ echo foo > meta/foo.n
- $ echo foo > meta/undo.py
- $ echo foo > meta/undo.i
- $ echo foo > meta/undo.d
- $ echo foo > meta/undo.n
- $ echo foo > meta/undo.foo.i
- $ echo foo > meta/undo.foo.d
- $ echo foo > meta/undo.foo.n
- $ echo foo > meta/undo.babar
- $ mkdir store
- $ echo foo > store/foo.i
- $ echo foo > store/foo.d
- $ echo foo > store/foo.n
- $ echo foo > store/undo.py
- $ echo foo > store/undo.i
- $ echo foo > store/undo.d
- $ echo foo > store/undo.n
- $ echo foo > store/undo.foo.i
- $ echo foo > store/undo.foo.d
- $ echo foo > store/undo.foo.n
- $ echo foo > store/undo.babar
-
-Name with special characters
-
- $ echo foo > store/CélesteVille_is_a_Capital_City
-
-name causing issue6581
-
- $ mkdir -p container/isam-build-centos7/
- $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
-
-Add all that
-
- $ hg add .
+ $ sh $TESTDIR/testlib/stream_clone_setup.sh
adding 00changelog-ab349180a0405010.nd
adding 00changelog.d
adding 00changelog.i
@@ -165,7 +77,7 @@
adding undo.i
adding undo.n
adding undo.py
- $ hg ci -m 'add files with "tricky" name'
+
$ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid > $DAEMON_PIDS
$ cd ..
@@ -384,65 +296,65 @@
#if no-zstd no-rust
$ f --size --hex --bytes 256 body
- body: size=119153
+ body: size=119123
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10|
+ 0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
+ 0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
- 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
- 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
- 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
- 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
- 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
- 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
- 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan|
- 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
+ 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
+ 0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
+ 0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
+ 0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
+ 0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
+ 00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
+ 00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
+ 00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
+ 00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
+ 00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
+ 00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
#endif
#if zstd no-rust
$ f --size --hex --bytes 256 body
- body: size=116340 (no-bigendian !)
+ body: size=116310
body: size=116335 (bigendian !)
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10|
+ 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
+ 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres|
- 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl|
- 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
- 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s|
- 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......|
- 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................|
- 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.|
- 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..|
- 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
+ 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
+ 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
+ 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
+ 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
+ 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
+ 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
+ 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
+ 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
+ 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
+ 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
+ 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
#endif
#if zstd rust no-dirstate-v2
$ f --size --hex --bytes 256 body
- body: size=116361
+ body: size=116310
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10|
+ 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
+ 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot|
- 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
- 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
- 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod|
- 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co|
- 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2|
- 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
- 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
- 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
- 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
- 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
- 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
+ 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
+ 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
+ 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
+ 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
+ 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
+ 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
+ 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
+ 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
+ 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
+ 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
+ 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
+ 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
#endif
#if zstd dirstate-v2
$ f --size --hex --bytes 256 body
--- a/tests/test-clone.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-clone.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
Prepare repo a:
$ hg init a
@@ -1206,14 +1196,12 @@
#if windows
$ hg clone "ssh://%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg
[255]
$ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg
@@ -1221,14 +1209,12 @@
#else
$ hg clone "ssh://%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg
[255]
$ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg
@@ -1237,7 +1223,6 @@
$ hg clone "ssh://v-alid.example.com/" --debug
running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg
--- a/tests/test-clonebundles.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-clonebundles.t Fri Feb 18 14:27:43 2022 +0100
@@ -279,8 +279,9 @@
$ hg -R server debugcreatestreamclonebundle packed.hg
writing 613 bytes for 4 files
- bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust !)
- bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog (rust !)
+ bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust no-zstd !)
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (no-rust zstd !)
+ bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (rust !)
No bundle spec should work
--- a/tests/test-commandserver.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-commandserver.t Fri Feb 18 14:27:43 2022 +0100
@@ -159,7 +159,7 @@
... b'default'])
*** runcommand log -b --config=alias.log=!echo pwned default
abort: unknown revision '--config=alias.log=!echo pwned'
- [255]
+ [10]
check that "histedit --commands=-" can read rules from the input channel:
--- a/tests/test-commit-interactive.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-commit-interactive.t Fri Feb 18 14:27:43 2022 +0100
@@ -1494,7 +1494,7 @@
Hunk #1 FAILED at 0
1 out of 1 hunks FAILED -- saving rejects to file editedfile.rej
abort: patch failed to apply
- [10]
+ [20]
$ cat editedfile
This change will not be committed
This is the second line
--- a/tests/test-commit.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-commit.t Fri Feb 18 14:27:43 2022 +0100
@@ -134,13 +134,13 @@
$ hg add quux
$ hg commit -m "adding internal used extras" --extra amend_source=hash
abort: key 'amend_source' is used internally, can't be set manually
- [255]
+ [10]
$ hg commit -m "special chars in extra" --extra id@phab=214
abort: keys can only contain ascii letters, digits, '_' and '-'
- [255]
+ [10]
$ hg commit -m "empty key" --extra =value
abort: unable to parse '=value', keys can't be empty
- [255]
+ [10]
$ hg commit -m "adding extras" --extra sourcehash=foo --extra oldhash=bar
$ hg log -r . -T '{extras % "{extra}\n"}'
branch=default
@@ -661,11 +661,11 @@
#if windows
$ hg co --clean tip
abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
- [255]
+ [10]
#else
$ hg co --clean tip
abort: path contains illegal component: .h\xe2\x80\x8cg/hgrc (esc)
- [255]
+ [10]
#endif
$ hg rollback -f
@@ -686,7 +686,7 @@
$ "$PYTHON" evil-commit.py
$ hg co --clean tip
abort: path contains illegal component: HG~1/hgrc
- [255]
+ [10]
$ hg rollback -f
repository tip rolled back to revision 2 (undo commit)
@@ -706,7 +706,7 @@
$ "$PYTHON" evil-commit.py
$ hg co --clean tip
abort: path contains illegal component: HG8B6C~2/hgrc
- [255]
+ [10]
$ cd ..
--- a/tests/test-completion.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-completion.t Fri Feb 18 14:27:43 2022 +0100
@@ -272,7 +272,7 @@
debugantivirusrunning:
debugapplystreamclonebundle:
debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
- debugbuilddag: mergeable-file, overwritten-file, new-file
+ debugbuilddag: mergeable-file, overwritten-file, new-file, from-existing
debugbundle: all, part-type, spec
debugcapabilities:
debugchangedfiles: compute
--- a/tests/test-conflict.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-conflict.t Fri Feb 18 14:27:43 2022 +0100
@@ -267,7 +267,7 @@
3
6
8
- ||||||| base
+ ||||||| common ancestor
One
Two
Three
@@ -308,7 +308,7 @@
2
3
<<<<<<<
- ------- base
+ ------- working copy parent
+++++++ working copy
4
+4.5
@@ -346,7 +346,7 @@
3.5
4.5
5.5
- ------- base
+ ------- working copy parent
+++++++ destination
3
-4
@@ -404,7 +404,7 @@
1 other heads for branch "default"
$ hg merge --tool :merge-local
merging a
- 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat a
Start of file
--- a/tests/test-copies-chain-merge.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-copies-chain-merge.t Fri Feb 18 14:27:43 2022 +0100
@@ -463,14 +463,14 @@
3 files updated, 0 files merged, 2 files removed, 0 files unresolved
$ hg merge 'desc("q-2")' --tool ':union'
merging v
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci -m "mPQm-0 $case_desc - one way"
$ hg up 'desc("q-2")'
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("p-2")' --tool ':union'
merging v
- 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci -m "mQPm-0 $case_desc - the other way"
created new head
@@ -626,14 +626,14 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("g-1")' --tool :union
merging d
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci -m "mDGm-0 $case_desc - one way"
$ hg up 'desc("g-1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg merge 'desc("d-2")' --tool :union
merging d
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci -m "mGDm-0 $case_desc - the other way"
created new head
@@ -1649,22 +1649,10 @@
> [format]
> exp-use-copies-side-data-changeset = yes
> EOF
- $ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
+ $ hg debugformat -v | egrep 'changelog-v2|revlog-v2|copies-sdc'
copies-sdc: no yes no
revlog-v2: no no no
changelog-v2: no yes no
- plain-cl-delta: yes yes yes
- compression: * (glob)
- compression-level: default default default
$ hg debugupgraderepo --run --quiet
upgrade will perform the following actions:
@@ -1689,22 +1677,10 @@
> enabled=yes
> numcpus=8
> EOF
- $ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
+ $ hg debugformat -v | egrep 'changelog-v2|revlog-v2|copies-sdc'
copies-sdc: no yes no
revlog-v2: no no no
changelog-v2: no yes no
- plain-cl-delta: yes yes yes
- compression: * (glob)
- compression-level: default default default
$ hg debugupgraderepo --run --quiet
upgrade will perform the following actions:
--- a/tests/test-copies-in-changeset.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-copies-in-changeset.t Fri Feb 18 14:27:43 2022 +0100
@@ -32,41 +32,17 @@
$ hg init repo
$ cd repo
#if sidedata
- $ hg debugformat -v
+ $ hg debugformat -v | egrep 'format-variant|revlog-v2|copies-sdc|changelog-v2'
format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
revlog-v2: no no no
changelog-v2: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
#else
- $ hg debugformat -v
+ $ hg debugformat -v | egrep 'format-variant|revlog-v2|copies-sdc|changelog-v2'
format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: no no no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
#endif
$ echo a > a
$ hg add a
@@ -425,23 +401,11 @@
downgrading
- $ hg debugformat -v
+ $ hg debugformat -v | egrep 'format-variant|revlog-v2|copies-sdc|changelog-v2'
format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
revlog-v2: no no no
changelog-v2: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0014 size 14
@@ -456,23 +420,11 @@
> revlogv2 = enable-unstable-format-and-corrupt-my-data
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
- $ hg debugformat -v
+ $ hg debugformat -v | egrep 'format-variant|revlog-v2|copies-sdc|changelog-v2'
format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
copies-sdc: no no no
revlog-v2: yes yes no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
$ hg debugsidedata -c -- 0
$ hg debugsidedata -c -- 1
$ hg debugsidedata -m -- 0
@@ -484,23 +436,11 @@
> exp-use-copies-side-data-changeset = yes
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
- $ hg debugformat -v
+ $ hg debugformat -v | egrep 'format-variant|revlog-v2|copies-sdc|changelog-v2'
format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
copies-sdc: yes yes no
revlog-v2: no no no
changelog-v2: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0014 size 14
--- a/tests/test-copy-move-merge.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-copy-move-merge.t Fri Feb 18 14:27:43 2022 +0100
@@ -104,12 +104,12 @@
preserving a for resolve of b
preserving a for resolve of c
removing a
- b: remote moved from a -> m (premerge)
+ b: remote moved from a -> m
picked tool ':merge' for b (binary False symlink False changedelete False)
merging a and b to b
my b@add3f11052fa+ other b@17c05bb7fcb6 ancestor a@b8bf91eeebbc
premerge successful
- c: remote moved from a -> m (premerge)
+ c: remote moved from a -> m
picked tool ':merge' for c (binary False symlink False changedelete False)
merging a and c to c
my c@add3f11052fa+ other c@17c05bb7fcb6 ancestor a@b8bf91eeebbc
--- a/tests/test-debugcommands.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-debugcommands.t Fri Feb 18 14:27:43 2022 +0100
@@ -649,16 +649,16 @@
local: no
pushable: yes
+#if rust
+
$ hg --debug debugpeer ssh://user@dummy/debugrevlog
running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 444 (no-rust !)
- remote: 463 (rust !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
+ remote: 468
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -666,3 +666,45 @@
url: ssh://user@dummy/debugrevlog
local: no
pushable: yes
+
+#endif
+
+#if no-rust zstd
+
+ $ hg --debug debugpeer ssh://user@dummy/debugrevlog
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
+ devel-peer-request: hello+between
+ devel-peer-request: pairs: 81 bytes
+ sending hello command
+ sending between command
+ remote: 468
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 1
+ devel-peer-request: protocaps
+ devel-peer-request: caps: * bytes (glob)
+ sending protocaps command
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
+
+#endif
+
+#if no-rust no-zstd
+
+ $ hg --debug debugpeer ssh://user@dummy/debugrevlog
+ running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
+ devel-peer-request: hello+between
+ devel-peer-request: pairs: 81 bytes
+ sending hello command
+ sending between command
+ remote: 444
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 1
+ devel-peer-request: protocaps
+ devel-peer-request: caps: * bytes (glob)
+ sending protocaps command
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
+
+#endif
--- a/tests/test-devel-warnings.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-devel-warnings.t Fri Feb 18 14:27:43 2022 +0100
@@ -282,12 +282,12 @@
#if no-chg no-pyoxidizer
$ hg blackbox -l 7
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at:
*/hg:* in <module> (glob) (?)
*/mercurial/dispatch.py:* in run (glob)
@@ -303,17 +303,17 @@
*/mercurial/dispatch.py:* in <lambda> (glob)
*/mercurial/util.py:* in check (glob)
$TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
#endif
#if chg no-pyoxidizer
$ hg blackbox -l 7
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at:
*/hg:* in <module> (glob)
*/mercurial/dispatch.py:* in run (glob)
@@ -352,17 +352,17 @@
*/mercurial/dispatch.py:* in <lambda> (glob)
*/mercurial/util.py:* in check (glob)
$TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
#endif
#if pyoxidizer
$ hg blackbox -l 7
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
(compatibility will be dropped after Mercurial-42.1337, update your code.) at:
<string>:* in <module> (glob)
mercurial.dispatch:* in run (glob)
@@ -378,8 +378,8 @@
mercurial.dispatch:* in <lambda> (glob)
mercurial.util:* in check (glob)
$TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
#endif
Test programming error failure:
--- a/tests/test-diff-change.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-diff-change.t Fri Feb 18 14:27:43 2022 +0100
@@ -284,7 +284,7 @@
7
-<<<<<<< local: fd1f17c90d7c - test: new file
z
- -||||||| base
+ -||||||| base: ae119d680c82 - test: lots of text
-8
-=======
-y
--- a/tests/test-diff-unified.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-diff-unified.t Fri Feb 18 14:27:43 2022 +0100
@@ -46,7 +46,7 @@
$ hg diff --nodates -U foo
abort: diff context lines count must be an integer, not 'foo'
- [255]
+ [10]
$ hg diff --nodates -U 2
@@ -87,7 +87,7 @@
$ hg --config diff.unified=foo diff --nodates
abort: diff context lines count must be an integer, not 'foo'
- [255]
+ [10]
noprefix config and option
--- a/tests/test-dirstate-race.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-dirstate-race.t Fri Feb 18 14:27:43 2022 +0100
@@ -18,7 +18,7 @@
Do we ever miss a sub-second change?:
$ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
- > hg co -qC 0
+ > hg update -qC 0
> echo b > a
> hg st
> done
@@ -66,11 +66,11 @@
> )
> def extsetup(ui):
> extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
- > def overridechecklookup(orig, self, files):
+ > def overridechecklookup(orig, self, *args, **kwargs):
> # make an update that changes the dirstate from underneath
> self._repo.ui.system(br"sh '$TESTTMP/dirstaterace.sh'",
> cwd=self._repo.root)
- > return orig(self, files)
+ > return orig(self, *args, **kwargs)
> EOF
$ hg debugrebuilddirstate
@@ -89,6 +89,7 @@
> rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
> EOF
+ $ sleep 1 # ensure non-ambiguous mtime
$ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
M d
M e
@@ -147,6 +148,8 @@
>
> hg update -q -C 0
> hg cat -r 1 b > b
+ > # make sure the timestamps is not ambiguous and a write will be issued
+ > touch -t 198606251012 b
> EOF
"hg status" below should excludes "e", of which exec flag is set, for
--- a/tests/test-dirstate-race2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-dirstate-race2.t Fri Feb 18 14:27:43 2022 +0100
@@ -19,22 +19,34 @@
$ hg commit -qAm _
$ echo aa > a
$ hg commit -m _
+# this sleep is there to ensure current time has -at-least- one second away
+# from the current time. It ensure the mtime is not ambiguous. If the test
+# "sleep" longer this will be fine.
+# It is not used to synchronise parallele operation so it is "fine" to use it.
+ $ sleep 1
+ $ hg status
$ hg debugdirstate --no-dates
n 644 3 (set |unset) a (re)
$ cat >> $TESTTMP/dirstaterace.py << EOF
+ > import time
> from mercurial import (
+ > commit,
> extensions,
> merge,
> )
> def extsetup(ui):
- > extensions.wrapfunction(merge, 'applyupdates', wrap)
- > def wrap(orig, *args, **kwargs):
- > res = orig(*args, **kwargs)
- > with open("a", "w"):
- > pass # just truncate the file
- > return res
+ > extensions.wrapfunction(merge, 'applyupdates', wrap(0))
+ > extensions.wrapfunction(commit, 'commitctx', wrap(1))
+ > def wrap(duration):
+ > def new(orig, *args, **kwargs):
+ > res = orig(*args, **kwargs)
+ > with open("a", "w"):
+ > pass # just truncate the file
+ > time.sleep(duration)
+ > return res
+ > return new
> EOF
Do an update where file 'a' is changed between hg writing it to disk
@@ -46,3 +58,32 @@
$ hg debugdirstate --no-dates
n 644 2 (set |unset) a (re)
$ echo a > a; hg status; hg diff
+
+Do a commit where file 'a' is changed between hg committing its new
+revision into the repository, and the writing of the dirstate.
+
+This used to results in a corrupted dirstate (size did not match committed size).
+
+ $ echo aaa > a; hg commit -qm _
+ $ hg merge -qr 1; hg resolve -m; rm a.orig
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ (no more unresolved files)
+ $ cat a
+ <<<<<<< working copy: be46f74ce38d - test: _
+ aaa
+ =======
+ aa
+ >>>>>>> merge rev: eb3fc6c17aa3 - test: _
+ $ hg debugdirstate --no-dates
+ m 0 -2 (set |unset) a (re)
+ $ hg commit -m _ --config extensions.race=$TESTTMP/dirstaterace.py
+ $ hg debugdirstate --no-dates
+ n 0 -1 unset a
+ $ cat a | wc -c
+ *0 (re)
+ $ hg cat -r . a | wc -c
+ *105 (re)
+ $ hg status; hg diff --stat
+ M a
+ a | 5 -----
+ 1 files changed, 0 insertions(+), 5 deletions(-)
--- a/tests/test-dispatch.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-dispatch.t Fri Feb 18 14:27:43 2022 +0100
@@ -84,7 +84,7 @@
> raise Exception('bad')
> EOF
$ hg log -b '--config=extensions.bad=bad.py' default
- *** failed to import extension bad from bad.py: bad
+ *** failed to import extension "bad" from bad.py: bad
abort: option --config may not be abbreviated
[10]
@@ -127,20 +127,20 @@
#if no-chg
$ HGPLAIN=+strictflags hg log -b --config='hooks.pre-log=false' default
abort: unknown revision '--config=hooks.pre-log=false'
- [255]
+ [10]
$ HGPLAIN=+strictflags hg log -b -R. default
abort: unknown revision '-R.'
- [255]
+ [10]
$ HGPLAIN=+strictflags hg log -b --cwd=. default
abort: unknown revision '--cwd=.'
- [255]
+ [10]
#endif
$ HGPLAIN=+strictflags hg log -b --debugger default
abort: unknown revision '--debugger'
- [255]
+ [10]
$ HGPLAIN=+strictflags hg log -b --config='alias.log=!echo pwned' default
abort: unknown revision '--config=alias.log=!echo pwned'
- [255]
+ [10]
$ HGPLAIN=+strictflags hg log --config='hooks.pre-log=false' -b default
abort: option --config may not be abbreviated
--- a/tests/test-double-merge.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-double-merge.t Fri Feb 18 14:27:43 2022 +0100
@@ -38,12 +38,12 @@
starting 4 threads for background file closing (?)
preserving foo for resolve of bar
preserving foo for resolve of foo
- bar: remote copied from foo -> m (premerge)
+ bar: remote copied from foo -> m
picked tool ':merge' for bar (binary False symlink False changedelete False)
merging foo and bar to bar
my bar@6a0df1dad128+ other bar@484bf6903104 ancestor foo@e6dc8efe11cc
premerge successful
- foo: versions differ -> m (premerge)
+ foo: versions differ -> m
picked tool ':merge' for foo (binary False symlink False changedelete False)
merging foo
my foo@6a0df1dad128+ other foo@484bf6903104 ancestor foo@e6dc8efe11cc
--- a/tests/test-empty.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-empty.t Fri Feb 18 14:27:43 2022 +0100
@@ -25,9 +25,10 @@
store
wcache
-Should be empty:
+Should be empty (except for the "basic" requires):
$ ls .hg/store
+ requires
Poke at a clone:
@@ -51,8 +52,9 @@
store
wcache
-Should be empty:
+Should be empty (except for the "basic" requires):
$ ls .hg/store
+ requires
$ cd ..
--- a/tests/test-extension.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-extension.t Fri Feb 18 14:27:43 2022 +0100
@@ -649,7 +649,7 @@
module stub. Our custom lazy importer for Python 2 always returns a stub.
$ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity) || true
- *** failed to import extension checkrelativity from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !)
+ *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !)
hg: unknown command 'checkrelativity' (py3 !)
(use 'hg help' for a list of commands) (py3 !)
@@ -1882,7 +1882,7 @@
> EOF
$ hg deprecatedcmd > /dev/null
- *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
+ *** failed to import extension "deprecatedcmd" from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
*** (use @command decorator to register 'deprecatedcmd')
hg: unknown command 'deprecatedcmd'
(use 'hg help' for a list of commands)
@@ -1891,7 +1891,7 @@
the extension shouldn't be loaded at all so the mq works:
$ hg qseries --config extensions.mq= > /dev/null
- *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
+ *** failed to import extension "deprecatedcmd" from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
*** (use @command decorator to register 'deprecatedcmd')
$ cd ..
@@ -1939,8 +1939,117 @@
> test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
> EOF
$ hg -R $TESTTMP/opt-unicode-default dummy
- *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode *'value' found in cmdtable.dummy (glob)
+ *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy (py3 !)
+ *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy (no-py3 !)
*** (use b'' to make it byte string)
hg: unknown command 'dummy'
(did you mean summary?)
[10]
+
+Check the mandatory extension feature
+-------------------------------------
+
+ $ hg init mandatory-extensions
+ $ cat > $TESTTMP/mandatory-extensions/.hg/good.py << EOF
+ > pass
+ > EOF
+ $ cat > $TESTTMP/mandatory-extensions/.hg/bad.py << EOF
+ > raise RuntimeError("babar")
+ > EOF
+ $ cat > $TESTTMP/mandatory-extensions/.hg/syntax.py << EOF
+ > def (
+ > EOF
+
+Check that the good one load :
+
+ $ cat > $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > [extensions]
+ > good = $TESTTMP/mandatory-extensions/.hg/good.py
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ 000000000000 tip
+
+Make it mandatory to load
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > good:required = yes
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ 000000000000 tip
+
+Check that the bad one does not load
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > bad = $TESTTMP/mandatory-extensions/.hg/bad.py
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar
+ 000000000000 tip
+
+Make it mandatory to load
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > bad:required = yes
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ abort: failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar
+ (loading of this extension was required, see `hg help config.extensions` for details)
+ [255]
+
+Make it not mandatory to load
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > bad:required = no
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar
+ 000000000000 tip
+
+Same check with the syntax error one
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > bad = !
+ > syntax = $TESTTMP/mandatory-extensions/.hg/syntax.py
+ > syntax:required = yes
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ abort: failed to import extension "syntax" from $TESTTMP/mandatory-extensions/.hg/syntax.py: invalid syntax (*syntax.py, line 1) (glob)
+ (loading of this extension was required, see `hg help config.extensions` for details)
+ [255]
+
+Same check with a missing one
+
+ $ cat >> $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > syntax = !
+ > syntax:required =
+ > missing = foo/bar/baz/I/do/not/exist/
+ > missing:required = yes
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ abort: failed to import extension "missing" from foo/bar/baz/I/do/not/exist/: [Errno 2] $ENOENT$: 'foo/bar/baz/I/do/not/exist'
+ (loading of this extension was required, see `hg help config.extensions` for details)
+ [255]
+
+Have a "default" setting for the suboption:
+
+ $ cat > $TESTTMP/mandatory-extensions/.hg/hgrc << EOF
+ > [extensions]
+ > bad = $TESTTMP/mandatory-extensions/.hg/bad.py
+ > bad:required = no
+ > good = $TESTTMP/mandatory-extensions/.hg/good.py
+ > syntax = $TESTTMP/mandatory-extensions/.hg/syntax.py
+ > *:required = yes
+ > EOF
+
+ $ hg -R mandatory-extensions id
+ *** failed to import extension "bad" from $TESTTMP/mandatory-extensions/.hg/bad.py: babar
+ abort: failed to import extension "syntax" from $TESTTMP/mandatory-extensions/.hg/syntax.py: invalid syntax (*syntax.py, line 1) (glob)
+ (loading of this extension was required, see `hg help config.extensions` for details)
+ [255]
--- a/tests/test-fastannotate-hg.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-fastannotate-hg.t Fri Feb 18 14:27:43 2022 +0100
@@ -165,16 +165,16 @@
a
a
a
- <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
+ <<<<<<< working copy: 5fbdc1152d97 - test: b2.1
b4
c
b5
- ||||||| base
+ ||||||| common ancestor: 3086dbafde1c - test: b
=======
b4
b5
b6
- >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
+ >>>>>>> merge rev: 37ec9f5c3d1f - test: b2
$ cat <<EOF > b
> a
> a
@@ -789,16 +789,16 @@
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
[1]
$ cat a
- <<<<<<< working copy: 0a068f0261cf - test: 3
+ <<<<<<< working copy: 0a068f0261cf - test: 3
1
2
3
- ||||||| base
+ ||||||| common ancestor: 1ed24be7e7a0 - test: 2
1
2
=======
a
- >>>>>>> merge rev: 9409851bc20a - test: a
+ >>>>>>> merge rev: 9409851bc20a - test: a
$ cat > a << EOF
> b
> 1
--- a/tests/test-fix.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-fix.t Fri Feb 18 14:27:43 2022 +0100
@@ -1169,8 +1169,9 @@
$ hg commit -Aqm "foo"
$ hg ci --amend -m rewritten
$ hg --hidden fix -r 0
- abort: fixing obsolete revision could cause divergence
- [255]
+ abort: cannot fix b87e30dbf19b, as that creates content-divergence with 2e007a78dfb8
+ (add --verbose for details or see 'hg help evolution.instability')
+ [10]
$ hg --hidden fix -r 0 --config experimental.evolution.allowdivergence=true
2 new content-divergent changesets
--- a/tests/test-fncache.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-fncache.t Fri Feb 18 14:27:43 2022 +0100
@@ -153,6 +153,7 @@
.hg/store/data/tst.d.hg
.hg/store/data/tst.d.hg/_foo.i
.hg/store/phaseroots
+ .hg/store/requires
.hg/store/undo
.hg/store/undo.backupfiles
.hg/store/undo.phaseroots
--- a/tests/test-graft.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-graft.t Fri Feb 18 14:27:43 2022 +0100
@@ -212,7 +212,7 @@
ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6
starting 4 threads for background file closing (?)
preserving b for resolve of b
- b: local copied/moved from a -> m (premerge)
+ b: local copied/moved from a -> m
picked tool ':merge' for b (binary False symlink False changedelete False)
merging b and a to b
my b@ef0ef43d49e7+ other a@5d205f8b35b6 ancestor a@68795b066622
@@ -242,13 +242,10 @@
d: remote is newer -> g
getting d
preserving e for resolve of e
- e: versions differ -> m (premerge)
+ e: versions differ -> m
picked tool ':merge' for e (binary False symlink False changedelete False)
merging e
my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304
- e: versions differ -> m (merge)
- picked tool ':merge' for e (binary False symlink False changedelete False)
- my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304
warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
@@ -855,8 +852,8 @@
$ hg graft -r 6 --base 5
grafting 6:25a2b029d3ae "6"
merging d
+ warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
merging e
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
[1]
--- a/tests/test-grep.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-grep.t Fri Feb 18 14:27:43 2022 +0100
@@ -1200,11 +1200,11 @@
$ hg log -f add0-cp4
abort: cannot follow nonexistent file: "add0-cp4"
- [255]
+ [20]
$ hg grep --diff -f data add0-cp4
abort: cannot follow nonexistent file: "add0-cp4"
- [255]
+ [20]
BROKEN: maybe better to abort
$ hg grep -f data add0-cp4
@@ -1214,11 +1214,11 @@
$ hg log -f add0-cp1-mod1-rm3
abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
- [255]
+ [20]
$ hg grep --diff -f data add0-cp1-mod1-rm3
abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
- [255]
+ [20]
BROKEN: maybe better to abort
$ hg grep -f data add0-cp1-mod1-rm3
@@ -1229,11 +1229,11 @@
$ hg log -fr. add0-cp1-mod1-rm3
abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
- [255]
+ [20]
$ hg grep --diff -fr. data add0-cp1-mod1-rm3
abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
- [255]
+ [20]
BROKEN: should abort
$ hg grep -fr. data add0-cp1-mod1-rm3
@@ -1244,11 +1244,11 @@
$ hg log -f add0-rm4
abort: cannot follow file not in parent revision: "add0-rm4"
- [255]
+ [20]
$ hg grep --diff -f data add0-rm4
abort: cannot follow file not in parent revision: "add0-rm4"
- [255]
+ [20]
BROKEN: should abort
$ hg grep -f data add0-rm4
@@ -1340,11 +1340,11 @@
$ hg log -fr2 add0-rm2
abort: cannot follow file not in any of the specified revisions: "add0-rm2"
- [255]
+ [20]
$ hg grep --diff -fr2 data add0-rm2
abort: cannot follow file not in any of the specified revisions: "add0-rm2"
- [255]
+ [20]
BROKEN: should abort
$ hg grep -fr2 data add0-rm2
--- a/tests/test-hardlinks.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-hardlinks.t Fri Feb 18 14:27:43 2022 +0100
@@ -52,6 +52,7 @@
1 r1/.hg/store/data/f1.i
1 r1/.hg/store/fncache (repofncache !)
1 r1/.hg/store/phaseroots
+ 1 r1/.hg/store/requires
1 r1/.hg/store/undo
1 r1/.hg/store/undo.backup.fncache (repofncache !)
1 r1/.hg/store/undo.backupfiles
@@ -93,6 +94,7 @@
2 r1/.hg/store/data/f1.i
1 r1/.hg/store/fncache (repofncache !)
1 r1/.hg/store/phaseroots
+ 1 r1/.hg/store/requires
1 r1/.hg/store/undo
1 r1/.hg/store/undo.backup.fncache (repofncache !)
1 r1/.hg/store/undo.backupfiles
@@ -104,6 +106,7 @@
2 r2/.hg/store/data/d1/f2.i
2 r2/.hg/store/data/f1.i
1 r2/.hg/store/fncache (repofncache !)
+ 1 r2/.hg/store/requires
Repo r3 should not be hardlinked:
@@ -114,6 +117,7 @@
1 r3/.hg/store/data/f1.i
1 r3/.hg/store/fncache (repofncache !)
1 r3/.hg/store/phaseroots
+ 1 r3/.hg/store/requires
1 r3/.hg/store/undo
1 r3/.hg/store/undo.backupfiles
1 r3/.hg/store/undo.phaseroots
@@ -140,6 +144,7 @@
1 r3/.hg/store/data/f1.i
1 r3/.hg/store/fncache (repofncache !)
1 r3/.hg/store/phaseroots
+ 1 r3/.hg/store/requires
1 r3/.hg/store/undo
1 r3/.hg/store/undo.backup.fncache (repofncache !)
1 r3/.hg/store/undo.backup.phaseroots
@@ -172,6 +177,7 @@
1 r2/.hg/store/data/d1/f2.i
2 r2/.hg/store/data/f1.i
[12] r2/\.hg/store/fncache (re) (repofncache !)
+ 1 r2/.hg/store/requires
#if hardlink-whitelisted repofncache
$ nlinksdir r2/.hg/store/fncache
@@ -202,6 +208,7 @@
1 r2/.hg/store/data/d1/f2.i
1 r2/.hg/store/data/f1.i
1 r2/.hg/store/fncache (repofncache !)
+ 1 r2/.hg/store/requires
#if hardlink-whitelisted repofncache
$ nlinksdir r2/.hg/store/fncache
@@ -261,6 +268,7 @@
2 r4/.hg/store/data/f3.i
2 r4/.hg/store/fncache (repofncache !)
2 r4/.hg/store/phaseroots
+ 2 r4/.hg/store/requires
2 r4/.hg/store/undo
2 r4/.hg/store/undo.backup.fncache (repofncache !)
2 r4/.hg/store/undo.backup.phaseroots
@@ -318,6 +326,7 @@
2 r4/.hg/store/data/f3.i
2 r4/.hg/store/fncache
2 r4/.hg/store/phaseroots
+ 2 r4/.hg/store/requires
2 r4/.hg/store/undo
2 r4/.hg/store/undo.backup.fncache (repofncache !)
2 r4/.hg/store/undo.backup.phaseroots
--- a/tests/test-help.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-help.t Fri Feb 18 14:27:43 2022 +0100
@@ -1519,26 +1519,38 @@
"commands.update.check"
Determines what level of checking 'hg update' will perform before
moving to a destination revision. Valid values are "abort", "none",
- "linear", and "noconflict". "abort" always fails if the working
- directory has uncommitted changes. "none" performs no checking, and
- may result in a merge with uncommitted changes. "linear" allows any
- update as long as it follows a straight line in the revision history,
- and may trigger a merge with uncommitted changes. "noconflict" will
- allow any update which would not trigger a merge with uncommitted
- changes, if any are present. (default: "linear")
+ "linear", and "noconflict".
+
+ - "abort" always fails if the working directory has uncommitted
+ changes.
+ - "none" performs no checking, and may result in a merge with
+ uncommitted changes.
+ - "linear" allows any update as long as it follows a straight line in
+ the revision history, and may trigger a merge with uncommitted
+ changes.
+ - "noconflict" will allow any update which would not trigger a merge
+ with uncommitted changes, if any are present.
+
+ (default: "linear")
$ hg help config.commands.update.check
"commands.update.check"
Determines what level of checking 'hg update' will perform before
moving to a destination revision. Valid values are "abort", "none",
- "linear", and "noconflict". "abort" always fails if the working
- directory has uncommitted changes. "none" performs no checking, and
- may result in a merge with uncommitted changes. "linear" allows any
- update as long as it follows a straight line in the revision history,
- and may trigger a merge with uncommitted changes. "noconflict" will
- allow any update which would not trigger a merge with uncommitted
- changes, if any are present. (default: "linear")
+ "linear", and "noconflict".
+
+ - "abort" always fails if the working directory has uncommitted
+ changes.
+ - "none" performs no checking, and may result in a merge with
+ uncommitted changes.
+ - "linear" allows any update as long as it follows a straight line in
+ the revision history, and may trigger a merge with uncommitted
+ changes.
+ - "noconflict" will allow any update which would not trigger a merge
+ with uncommitted changes, if any are present.
+
+ (default: "linear")
$ hg help config.ommands.update.check
@@ -1587,6 +1599,8 @@
"use-dirstate-v2"
+ "use-dirstate-tracked-hint"
+
"use-persistent-nodemap"
"use-share-safe"
--- a/tests/test-hgignore.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-hgignore.t Fri Feb 18 14:27:43 2022 +0100
@@ -59,9 +59,19 @@
? syntax
$ echo "*.o" > .hgignore
+#if no-rhg
$ hg status
abort: $TESTTMP/ignorerepo/.hgignore: invalid pattern (relre): *.o (glob)
[255]
+#endif
+#if rhg
+ $ hg status
+ Unsupported syntax regex parse error:
+ ^(?:*.o)
+ ^
+ error: repetition operator missing expression
+ [255]
+#endif
Ensure given files are relative to cwd
--- a/tests/test-hgweb-commands.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-hgweb-commands.t Fri Feb 18 14:27:43 2022 +0100
@@ -2193,8 +2193,7 @@
lookup
pushkey
stream-preferred
- streamreqs=generaldelta,revlogv1,sparserevlog (no-rust !)
- streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog (rust !)
+ streamreqs=*,* (glob)
unbundle=HG10GZ,HG10BZ,HG10UN
unbundlehash
--- a/tests/test-histedit-non-commute-abort.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-histedit-non-commute-abort.t Fri Feb 18 14:27:43 2022 +0100
@@ -77,8 +77,8 @@
insert unsupported advisory merge record
$ hg --config extensions.fakemergerecord=$TESTDIR/fakemergerecord.py fakemergerecord -x
$ hg debugmergestate
- local (local): 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758
- other (histedit): e860deea161a2f77de56603b340ebbb4536308ae
+ local (already edited): 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758
+ other (current change): e860deea161a2f77de56603b340ebbb4536308ae
file: e (state "u")
local path: e (hash 58e6b3a414a1e090dfc6029add0f3555ccba127f, flags "")
ancestor path: e (node 0000000000000000000000000000000000000000)
--- a/tests/test-hook.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-hook.t Fri Feb 18 14:27:43 2022 +0100
@@ -464,6 +464,7 @@
fncache (repofncache !)
journal.phaseroots
phaseroots
+ requires
undo
undo.backup.fncache (repofncache !)
undo.backupfiles
--- a/tests/test-http-api-httpv2.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,743 +0,0 @@
-#require no-chg
-
- $ . $TESTDIR/wireprotohelpers.sh
- $ enabledummycommands
-
- $ hg init server
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > EOF
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-HTTP v2 protocol not enabled by default
-
- $ sendhttpraw << EOF
- > httprequest GET api/$HTTPV2
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/exp-http-v2-0003 HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 33\r\n
- s> \r\n
- s> API exp-http-v2-0003 not enabled\n
-
-Restart server with support for HTTP v2 API
-
- $ killdaemons.py
- $ enablehttpv2 server
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-Request to unknown command yields 404
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/badcommand
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/badcommand HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 42\r\n
- s> \r\n
- s> unknown wire protocol command: badcommand\n
-
-GET to read-only command yields a 405
-
- $ sendhttpraw << EOF
- > httprequest GET api/$HTTPV2/ro/customreadonly
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 405 Method Not Allowed\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Allow: POST\r\n
- s> Content-Length: 30\r\n
- s> \r\n
- s> commands require POST requests
-
-Missing Accept header results in 406
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/customreadonly
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 406 Not Acceptable\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 85\r\n
- s> \r\n
- s> client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n
-
-Bad Accept header results in 406
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/customreadonly
- > accept: invalid
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: invalid\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 406 Not Acceptable\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 85\r\n
- s> \r\n
- s> client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n
-
-Bad Content-Type header results in 415
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/customreadonly
- > accept: $MEDIATYPE
- > user-agent: test
- > content-type: badmedia
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: badmedia\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 415 Unsupported Media Type\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 88\r\n
- s> \r\n
- s> client MUST send Content-Type header with value: application/mercurial-exp-framing-0006\n
-
-Request to read-only command works out of the box
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/customreadonly
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> *\r\n (glob)
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 29\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
-
- $ sendhttpv2peerverbose << EOF
- > command customreadonly
- > EOF
- creating http peer for wire protocol version 2
- sending customreadonly command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 65\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x15\x00\x00\x01\x00\x01\x00\x11\xa1DnameNcustomreadonly
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x01\x00\x02\x041
- s> X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- b'customreadonly bytes response'
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Request to read-write command fails because server is read-only by default
-
-GET to read-write request yields 405
-
- $ sendhttpraw << EOF
- > httprequest GET api/$HTTPV2/rw/customreadonly
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 405 Method Not Allowed\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Allow: POST\r\n
- s> Content-Length: 30\r\n
- s> \r\n
- s> commands require POST requests
-
-Even for unknown commands
-
- $ sendhttpraw << EOF
- > httprequest GET api/$HTTPV2/rw/badcommand
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 405 Method Not Allowed\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Allow: POST\r\n
- s> Content-Length: 30\r\n
- s> \r\n
- s> commands require POST requests
-
-SSL required by default
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/rw/customreadonly
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 403 ssl required\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Length: 17\r\n
- s> \r\n
- s> permission denied
-
-Restart server to allow non-ssl read-write operations
-
- $ killdaemons.py
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > web.api.http-v2 = true
- > [web]
- > push_ssl = false
- > allow-push = *
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Authorized request for valid read-write command works
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/rw/customreadonly
- > user-agent: test
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 29\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
-
-Authorized request for unknown command is rejected
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/rw/badcommand
- > user-agent: test
- > accept: $MEDIATYPE
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 42\r\n
- s> \r\n
- s> unknown wire protocol command: badcommand\n
-
-debugreflect isn't enabled by default
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/debugreflect
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 34\r\n
- s> \r\n
- s> debugreflect service not available
-
-Restart server to get debugreflect endpoint
-
- $ killdaemons.py
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > web.api.debugreflect = true
- > web.api.http-v2 = true
- > [web]
- > push_ssl = false
- > allow-push = *
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Command frames can be reflected via debugreflect
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/debugreflect
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'command1', b'args': {b'foo': b'val1', b'bar1': b'val'}}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 47\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 223\r\n
- s> \r\n
- s> received: 1 1 1 \xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1\n
- s> ["runcommand", {"args": {"bar1": "val", "foo": "val1"}, "command": "command1", "data": null, "redirect": null, "requestid": 1}]\n
- s> received: <no frame>\n
- s> {"action": "noop"}
-
-Multiple requests to regular command URL are not allowed
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/customreadonly
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 29\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
-
-Multiple requests to "multirequest" URL are allowed
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/multirequest
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
- > frame 3 1 0 command-request new cbor:{b'name': b'customreadonly'}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> *\r\n (glob)
- s> *\r\n (glob)
- s> user-agent: test\r\n
- s> content-length: 58\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly\x15\x00\x00\x03\x00\x01\x00\x11\xa1DnameNcustomreadonly
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 27\r\n
- s> \x1f\x00\x00\x03\x00\x02\x041X\x1dcustomreadonly bytes response
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x03\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
-
-Interleaved requests to "multirequest" are processed
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/multirequest
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new|more \xa2Dargs\xa1Inamespace
- > frame 3 1 0 command-request new|more \xa2Dargs\xa1Inamespace
- > frame 3 1 0 command-request continuation JnamespacesDnameHlistkeys
- > frame 1 1 0 command-request continuation IbookmarksDnameHlistkeys
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 115\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x11\x00\x00\x01\x00\x01\x01\x15\xa2Dargs\xa1Inamespace\x11\x00\x00\x03\x00\x01\x00\x15\xa2Dargs\xa1Inamespace\x19\x00\x00\x03\x00\x01\x00\x12JnamespacesDnameHlistkeys\x18\x00\x00\x01\x00\x01\x00\x12IbookmarksDnameHlistkeys
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x03\x00\x02\x01\x92Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 28\r\n
- s> \x00\x00\x03\x00\x02\x041\xa3Ibookmarks@Jnamespaces@Fphases@
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x03\x00\x02\x002
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
- s> \r\n
- s> 9\r\n
- s> \x01\x00\x00\x01\x00\x02\x041\xa0
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
-
-Restart server to disable read-write access
-
- $ killdaemons.py
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > web.api.debugreflect = true
- > web.api.http-v2 = true
- > [web]
- > push_ssl = false
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Attempting to run a read-write command via multirequest on read-only URL is not allowed
-
- $ sendhttpraw << EOF
- > httprequest POST api/$HTTPV2/ro/multirequest
- > accept: $MEDIATYPE
- > content-type: $MEDIATYPE
- > user-agent: test
- > frame 1 1 stream-begin command-request new cbor:{b'name': b'pushkey'}
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> user-agent: test\r\n
- s> content-length: 22\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> \x0e\x00\x00\x01\x00\x01\x01\x11\xa1DnameGpushkey
- s> makefile('rb', None)
- s> HTTP/1.1 403 Forbidden\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 52\r\n
- s> \r\n
- s> insufficient permissions to execute command: pushkey
-
-Defining an invalid content encoding results in warning
-
- $ hg --config experimental.httppeer.v2-encoder-order=identity,badencoder --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF
- > command heads
- > EOF
- creating http peer for wire protocol version 2
- sending heads command
- wire protocol version 2 encoder referenced in config (badencoder) is not known; ignoring
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 56\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 1e\r\n
- s> \x16\x00\x00\x01\x00\x02\x041
- s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-#if zstd
-
- $ hg --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF
- > command heads
- > EOF
- creating http peer for wire protocol version 2
- sending heads command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 70\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> *\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x83Hzstd-8mbDzlibHidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hzstd-8mb
- s> \r\n
- s> 25\r\n
- s> \x1d\x00\x00\x01\x00\x02\x042
- s> (\xb5/\xfd\x00X\xa4\x00\x00p\xa1FstatusBok\x81T\x00\x01\x00\tP\x02
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-#endif
-
- $ cat error.log
--- a/tests/test-http-api.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,299 +0,0 @@
-#require no-chg
-
- $ send() {
- > hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/
- > }
-
- $ hg init server
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-Request to /api fails unless web.apiserver is enabled
-
- $ get-with-headers.py $LOCALIP:$HGPORT api
- 400 no such method: api
-
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
- <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
- <head>
- <link rel="icon" href="/static/hgicon.png" type="image/png" />
- <meta name="robots" content="index, nofollow" />
- <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
- <script type="text/javascript" src="/static/mercurial.js"></script>
-
- <title>$TESTTMP/server: error</title>
- </head>
- <body>
-
- <div class="container">
- <div class="menu">
- <div class="logo">
- <a href="https://mercurial-scm.org/">
- <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
- </div>
- <ul>
- <li><a href="/shortlog">log</a></li>
- <li><a href="/graph">graph</a></li>
- <li><a href="/tags">tags</a></li>
- <li><a href="/bookmarks">bookmarks</a></li>
- <li><a href="/branches">branches</a></li>
- </ul>
- <ul>
- <li><a href="/help">help</a></li>
- </ul>
- </div>
-
- <div class="main">
-
- <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
- <h3>error</h3>
-
-
- <form class="search" action="/log">
-
- <p><input name="rev" id="search1" type="text" size="30" value="" /></p>
- <div id="hint">Find changesets by keywords (author, files, the commit message), revision
- number or hash, or <a href="/help/revsets">revset expression</a>.</div>
- </form>
-
- <div class="description">
- <p>
- An error occurred while processing your request:
- </p>
- <p>
- no such method: api
- </p>
- </div>
- </div>
- </div>
-
-
-
- </body>
- </html>
-
- [1]
-
- $ get-with-headers.py $LOCALIP:$HGPORT api/
- 400 no such method: api
-
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
- <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
- <head>
- <link rel="icon" href="/static/hgicon.png" type="image/png" />
- <meta name="robots" content="index, nofollow" />
- <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
- <script type="text/javascript" src="/static/mercurial.js"></script>
-
- <title>$TESTTMP/server: error</title>
- </head>
- <body>
-
- <div class="container">
- <div class="menu">
- <div class="logo">
- <a href="https://mercurial-scm.org/">
- <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
- </div>
- <ul>
- <li><a href="/shortlog">log</a></li>
- <li><a href="/graph">graph</a></li>
- <li><a href="/tags">tags</a></li>
- <li><a href="/bookmarks">bookmarks</a></li>
- <li><a href="/branches">branches</a></li>
- </ul>
- <ul>
- <li><a href="/help">help</a></li>
- </ul>
- </div>
-
- <div class="main">
-
- <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
- <h3>error</h3>
-
-
- <form class="search" action="/log">
-
- <p><input name="rev" id="search1" type="text" size="30" value="" /></p>
- <div id="hint">Find changesets by keywords (author, files, the commit message), revision
- number or hash, or <a href="/help/revsets">revset expression</a>.</div>
- </form>
-
- <div class="description">
- <p>
- An error occurred while processing your request:
- </p>
- <p>
- no such method: api
- </p>
- </div>
- </div>
- </div>
-
-
-
- </body>
- </html>
-
- [1]
-
-Restart server with support for API server
-
- $ killdaemons.py
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-/api lists available APIs (empty since none are available by default)
-
- $ send << EOF
- > httprequest GET api
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 100\r\n
- s> \r\n
- s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
- s> \n
- s> (no available APIs)\n
-
- $ send << EOF
- > httprequest GET api/
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/ HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 100\r\n
- s> \r\n
- s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
- s> \n
- s> (no available APIs)\n
-
-Accessing an unknown API yields a 404
-
- $ send << EOF
- > httprequest GET api/unknown
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/unknown HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 33\r\n
- s> \r\n
- s> Unknown API: unknown\n
- s> Known APIs:
-
-Accessing a known but not enabled API yields a different error
-
- $ send << EOF
- > httprequest GET api/exp-http-v2-0003
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/exp-http-v2-0003 HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 33\r\n
- s> \r\n
- s> API exp-http-v2-0003 not enabled\n
-
-Restart server with support for HTTP v2 API
-
- $ killdaemons.py
- $ cat > server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > web.api.http-v2 = true
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-/api lists the HTTP v2 protocol as available
-
- $ send << EOF
- > httprequest GET api
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 96\r\n
- s> \r\n
- s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
- s> \n
- s> exp-http-v2-0003
-
- $ send << EOF
- > httprequest GET api/
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/ HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 96\r\n
- s> \r\n
- s> APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
- s> \n
- s> exp-http-v2-0003
--- a/tests/test-http-bad-server.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-http-bad-server.t Fri Feb 18 14:27:43 2022 +0100
@@ -30,14 +30,15 @@
$ cat > .hg/hgrc << EOF
> [extensions]
- > badserver = $TESTDIR/badserverext.py
+ > badserver = $TESTDIR/testlib/badserverext.py
> [server]
> compressionengines = none
> EOF
Failure to accept() socket should result in connection related error message
+----------------------------------------------------------------------------
- $ hg serve --config badserver.closebeforeaccept=true -p $HGPORT -d --pid-file=hg.pid
+ $ hg serve --config badserver.close-before-accept=true -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -50,8 +51,9 @@
$ killdaemons.py $DAEMON_PIDS
Failure immediately after accept() should yield connection related error message
+--------------------------------------------------------------------------------
- $ hg serve --config badserver.closeafteraccept=true -p $HGPORT -d --pid-file=hg.pid
+ $ hg serve --config badserver.close-after-accept=true -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid > $DAEMON_PIDS
TODO: this usually outputs good results, but sometimes emits abort:
@@ -69,8 +71,9 @@
$ killdaemons.py $DAEMON_PIDS
Failure to read all bytes in initial HTTP request should yield connection related error message
+-----------------------------------------------------------------------------------------------
- $ hg serve --config badserver.closeafterrecvbytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.close-after-recv-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -80,31 +83,18 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log
- readline(1 from 65537) -> (1) G
+ readline(1 from ~) -> (1) G
read limit reached; closing socket
$ rm -f error.log
Same failure, but server reads full HTTP request line
-
- $ hg serve --config badserver.closeafterrecvbytes=40 -p $HGPORT -d --pid-file=hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
- $ hg clone http://localhost:$HGPORT/ clone
- abort: error: bad HTTP status line: * (glob)
- [100]
-
- $ killdaemons.py $DAEMON_PIDS
+-----------------------------------------------------
- $ cat error.log
- readline(40 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
- readline(7 from *) -> (7) Accept- (glob)
- read limit reached; closing socket
-
- $ rm -f error.log
-
-Failure on subsequent HTTP request on the same socket (cmd?batch)
-
- $ hg serve --config badserver.closeafterrecvbytes=210,223 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-recv-patterns="GET /\?cmd=capabilities" \
+ > --config badserver.close-after-recv-bytes=7 \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
abort: error: bad HTTP status line: * (glob)
@@ -113,40 +103,65 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log
- readline(210 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
- readline(177 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(150 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
- readline(115 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
- readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
- readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(7 from *) -> (7) Accept- (glob)
+ read limit reached; closing socket
+
+ $ rm -f error.log
+
+Failure on subsequent HTTP request on the same socket (cmd?batch)
+-----------------------------------------------------------------
+
+ $ hg serve \
+ > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,GET /\?cmd=batch" \
+ > --config badserver.close-after-recv-bytes=15,197 \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ cat hg.pid > $DAEMON_PIDS
+ $ hg clone http://localhost:$HGPORT/ clone
+ abort: error: bad HTTP status line: * (glob)
+ [100]
+
+ $ killdaemons.py $DAEMON_PIDS
+
+ $ cat error.log
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
+ readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
+ readline(*) -> (2) \r\n (glob)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 431\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
- readline(1? from *) -> (1?) Accept-Encoding* (glob)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
+ readline(*) -> (1?) Accept-Encoding* (glob)
read limit reached; closing socket
- readline(223 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
- readline(197 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(170 from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(141 from *) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
- readline(100 from *) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
- readline(39 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
+ readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
+ readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(4 from *) -> (4) host (glob)
read limit reached; closing socket
$ rm -f error.log
Failure to read getbundle HTTP request
+--------------------------------------
- $ hg serve --config badserver.closeafterrecvbytes=308,317,304 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-recv-patterns="GET /\?cmd=batch,user-agent: mercurial/proto-1.0,GET /\?cmd=getbundle" \
+ > --config badserver.close-after-recv-bytes=110,26,274 \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
requesting all changes
@@ -158,38 +173,38 @@
$ cat error.log
readline(1 from -1) -> (1) x (?)
readline(1 from -1) -> (1) x (?)
- readline(308 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
- readline(275 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(248 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
- readline(213 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
- readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
- readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
+ readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
+ readline(*) -> (2) \r\n (glob)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 431\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(431) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
- readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(8? from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(5? from *) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
- readline(1? from *) -> (1?) x-hgproto-1:* (glob)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
+ readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
+ readline(*) -> (1?) x-hgproto-1:* (glob)
read limit reached; closing socket
- readline(317 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
- readline(291 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(264 from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
- readline(235 from *) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
- readline(194 from *) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
- readline(133 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
- readline(98 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
- readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
- readline(* from *) -> (2) \r\n (glob)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
+ readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
+ readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
+ readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
+ readline(*) -> (2) \r\n (glob)
sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
@@ -201,9 +216,9 @@
write(20) -> Content-Length: 42\r\n (no-py3 !)
write(2) -> \r\n (no-py3 !)
write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
- readline(* from 65537) -> (*) GET /?cmd=getbundle HTTP* (glob)
+ readline(24 from ~) -> (*) GET /?cmd=getbundle HTTP* (glob)
read limit reached; closing socket
- readline(304 from 65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+ readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(274 from *) -> (27) Accept-Encoding: identity\r\n (glob)
readline(247 from *) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(218 from *) -> (218) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtag (glob)
@@ -212,8 +227,13 @@
$ rm -f error.log
Now do a variation using POST to send arguments
+===============================================
- $ hg serve --config experimental.httppostargs=true --config badserver.closeafterrecvbytes=329,344 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-recv-patterns="x-hgargs-post:,user-agent: mercurial/proto-1.0" \
+ > --config badserver.close-after-recv-bytes="14,26" \
+ > --config experimental.httppostargs=true \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -223,57 +243,58 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(329 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
- readline(296 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(269 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
- readline(234 from *) -> (2?) host: localhost:$HGPORT\r\n (glob)
- readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
- readline(* from *) -> (2) \r\n (glob)
- sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py36 !)
- sendall(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 444\r\n\r\n (py3 no-py36 !)
- write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
+ readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
+ readline(*) -> (2) \r\n (glob)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
write(23) -> Server: badhttpserver\r\n (no-py3 !)
write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21) -> Content-Length: 444\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
write(2) -> \r\n (no-py3 !)
- write(444) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(1?? from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
- readline(1?? from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(1?? from *) -> (41) content-type: application/mercurial-0.1\r\n (glob)
- readline(6? from *) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
- readline(3? from *) -> (19) x-hgargs-post: 28\r\n (glob)
- readline(1? from *) -> (1?) x-hgproto-1: * (glob)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
+ readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
+ readline(*) -> (1?) x-hgproto-1: * (glob)
read limit reached; closing socket
- readline(344 from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n
- readline(317 from *) -> (27) Accept-Encoding: identity\r\n (glob)
- readline(290 from *) -> (41) content-type: application/mercurial-0.1\r\n (glob)
- readline(249 from *) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
- readline(216 from *) -> (19) x-hgargs-post: 28\r\n (glob)
- readline(197 from *) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
- readline(136 from *) -> (35) accept: application/mercurial-0.1\r\n (glob)
- readline(101 from *) -> (20) content-length: 28\r\n (glob)
- readline(81 from *) -> (*) host: localhost:$HGPORT\r\n (glob)
- readline(* from *) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
- readline(* from *) -> (2) \r\n (glob)
- read(* from 28) -> (*) cmds=* (glob)
- read limit reached, closing socket
+ readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n
+ readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
+ readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (33) vary: X-HgArgs-Post,X-HgProto-1\r\n (glob)
+ readline(*) -> (19) x-hgargs-post: 28\r\n (glob)
+ readline(*) -> (61) x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n (glob)
+ readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
+ readline(*) -> (20) content-length: 28\r\n (glob)
+ readline(*) -> (*) host: localhost:$HGPORT\r\n (glob)
+ readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
+ readline(*) -> (2) \r\n (glob)
+ read(24 from 28) -> (*) cmds=* (glob)
+ read limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
Traceback (most recent call last):
Exception: connection closed after receiving N bytes
write(126) -> HTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
$ rm -f error.log
Now move on to partial server responses
+=======================================
Server sends a single character from the HTTP response line
+-----------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve --config badserver.close-after-send-bytes=1 -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -283,7 +304,7 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
@@ -297,41 +318,43 @@
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
+ write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (glob) (py3 no-py36 !)
$ rm -f error.log
Server sends an incomplete capabilities response body
+-----------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=180 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='batch branchmap bund' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
- abort: HTTP request error (incomplete response; expected 431 bytes got 20)
+ abort: HTTP request error (incomplete response; expected * bytes got 20) (glob)
(this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
[255]
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(20 from 431) -> (0) batch branchmap bund (py36 !)
- write(160 from 160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(20 from 431) -> (0) batch branchmap bund (py3 no-py36 !)
- write(36 from 36) -> (144) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (121) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (84) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (22) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (20) \r\n (no-py3 !)
- write(20 from 431) -> (0) batch branchmap bund (no-py3 !)
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(20 from *) -> (0) batch branchmap bund (glob) (py36 !)
+ write(160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(20 from *) -> (0) batch branchmap bund (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(20 from *) -> (0) batch branchmap bund (glob) (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
Traceback (most recent call last):
@@ -341,8 +364,11 @@
$ rm -f error.log
Server sends incomplete headers for batch request
+-------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=709 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='(.*Content-Type: applicat){2}' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is horrible
@@ -358,24 +384,24 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (549) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (673) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (650) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (613) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (572) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (551) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (549) \r\n (no-py3 !)
- write(431 from 431) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
@@ -386,9 +412,9 @@
readline(*) -> (2) \r\n (glob)
sendall(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py36 !)
write(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py3 no-py36 !)
- write(36 from 36) -> (82) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (59) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (22) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(22 from 41) -> (0) Content-Type: applicat (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
@@ -396,13 +422,15 @@
Exception: connection closed after sending N bytes
write(285) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
$ rm -f error.log
Server sends an incomplete HTTP response body to batch request
+--------------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=774 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns=96ee1d7354c4ad7372047672 \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO client spews a stack due to uncaught ValueError in batch.results()
@@ -417,24 +445,24 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (614) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (633) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (738) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (715) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (678) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (637) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (616) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (614) \r\n (no-py3 !)
- write(431 from 431) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
@@ -443,16 +471,16 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(159 from 159) -> (24) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
+ sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
sendall(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py36 !)
- write(159 from 159) -> (24) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
+ write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py3 no-py36 !)
- write(36 from 36) -> (147) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (124) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (87) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (46) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(20 from 20) -> (26) Content-Length: 42\r\n (no-py3 !)
- write(2 from 2) -> (24) \r\n (no-py3 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(20) -> Content-Length: 42\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
@@ -463,8 +491,11 @@
$ rm -f error.log
Server sends incomplete headers for getbundle response
+------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=921 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='(.*Content-Type: application/mercuri){3}' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
TODO this output is terrible
@@ -481,24 +512,24 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (761) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (780) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (885) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (862) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (825) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (784) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (763) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (761) \r\n (no-py3 !)
- write(431 from 431) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
@@ -507,18 +538,18 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(159 from 159) -> (171) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
- sendall(42 from 42) -> (129) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
- write(159 from 159) -> (171) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
- write(42 from 42) -> (129) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
- write(36 from 36) -> (294) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (271) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (234) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (193) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(20 from 20) -> (173) Content-Length: 42\r\n (no-py3 !)
- write(2 from 2) -> (171) \r\n (no-py3 !)
- write(42 from 42) -> (129) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
- readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+ sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
+ sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
+ write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
+ write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(20) -> Content-Length: 42\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
+ readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
@@ -529,9 +560,9 @@
readline(*) -> (2) \r\n (glob)
sendall(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py36 !)
write(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py3 no-py36 !)
- write(36 from 36) -> (93) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (70) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (33) Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
write(33 from 41) -> (0) Content-Type: application/mercuri (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -539,13 +570,15 @@
Exception: connection closed after sending N bytes
write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
$ rm -f error.log
Server stops before it sends transfer encoding
+----------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=954 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns="Transfer-Encoding: chunke" \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -556,25 +589,34 @@
$ killdaemons.py $DAEMON_PIDS
#if py36
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -3
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -6
+ sendall(162 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunke
+ write limit reached; closing socket
+ $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
#else
- $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -4
+ $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -7
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(25 from 28) -> (0) Transfer-Encoding: chunke
+ write limit reached; closing socket
+ $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
+ write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
#endif
$ rm -f error.log
Server sends empty HTTP body for getbundle
+------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=959 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='Transfer-Encoding: chunked\r\n\r\n' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -586,24 +628,24 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (799) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (818) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (923) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (900) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (863) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (822) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (801) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (799) \r\n (no-py3 !)
- write(431 from 431) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
@@ -612,18 +654,18 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(159 from 159) -> (209) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
- sendall(42 from 42) -> (167) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
- write(159 from 159) -> (209) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
- write(42 from 42) -> (167) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
- write(36 from 36) -> (332) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (309) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (272) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (231) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(20 from 20) -> (211) Content-Length: 42\r\n (no-py3 !)
- write(2 from 2) -> (209) \r\n (no-py3 !)
- write(42 from 42) -> (167) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
- readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+ sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
+ sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
+ write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
+ write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(20) -> Content-Length: 42\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
+ readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
@@ -634,11 +676,11 @@
readline(*) -> (2) \r\n (glob)
sendall(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py36 !)
write(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36 from 36) -> (131) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (108) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (71) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (30) Content-Type: application/mercurial-0.2\r\n (no-py3 !)
- write(28 from 28) -> (2) Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n (no-py3 !)
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
write(2 from 2) -> (0) \r\n (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -646,13 +688,15 @@
Exception: connection closed after sending N bytes
write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36) -> HTTP/1.1 500 Internal Server Error\r\n (no-py3 !)
$ rm -f error.log
Server sends partial compression string
+---------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=983 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='4\r\nHG20\r\n' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -664,24 +708,24 @@
$ killdaemons.py $DAEMON_PIDS
$ cat error.log | "$PYTHON" $TESTDIR/filtertraceback.py
- readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+ readline(~) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (35) accept: application/mercurial-0.1\r\n (glob)
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(160 from 160) -> (823) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py36 !)
- sendall(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py36 !)
- write(160 from 160) -> (842) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 431\r\n\r\n (py3 no-py36 !)
- write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (py3 no-py36 !)
- write(36 from 36) -> (947) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (924) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (887) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (846) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(21 from 21) -> (825) Content-Length: 431\r\n (no-py3 !)
- write(2 from 2) -> (823) \r\n (no-py3 !)
- write(431 from 431) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-py3 !)
- readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+ sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
+ sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
+ write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(21) -> Content-Length: *\r\n (glob) (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py3 !)
+ readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n (glob)
@@ -690,17 +734,17 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(159 from 159) -> (233) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
- sendall(42 from 42) -> (191) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
- write(159 from 159) -> (233) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
- write(36 from 36) -> (356) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (333) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (296) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (255) Content-Type: application/mercurial-0.1\r\n (no-py3 !)
- write(20 from 20) -> (235) Content-Length: 42\r\n (no-py3 !)
- write(2 from 2) -> (233) \r\n (no-py3 !)
- write(42 from 42) -> (191) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
- readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+ sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
+ sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
+ write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.1\r\n (no-py3 !)
+ write(20) -> Content-Length: 42\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py3 !)
+ readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
readline(*) -> (440) x-hgarg-1: bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n (glob)
@@ -709,32 +753,34 @@
readline(*) -> (2?) host: localhost:$HGPORT\r\n (glob)
readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
readline(*) -> (2) \r\n (glob)
- sendall(167 from 167) -> (24) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py36 !)
- sendall(6 from 6) -> (18) 1\\r\\n\x04\\r\\n (esc) (py36 !)
- sendall(9 from 9) -> (9) 4\r\nnone\r\n (py36 !)
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py36 !)
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc) (py36 !)
+ sendall(9) -> 4\r\nnone\r\n (py36 !)
sendall(9 from 9) -> (0) 4\r\nHG20\r\n (py36 !)
- write(167 from 167) -> (24) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
- write(36 from 36) -> (155) HTTP/1.1 200 Script output follows\r\n (no-py3 !)
- write(23 from 23) -> (132) Server: badhttpserver\r\n (no-py3 !)
- write(37 from 37) -> (95) Date: $HTTP_DATE$\r\n (no-py3 !)
- write(41 from 41) -> (54) Content-Type: application/mercurial-0.2\r\n (no-py3 !)
- write(28 from 28) -> (26) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (24) \r\n (no-py3 !)
- write(6 from 6) -> (18) 1\\r\\n\x04\\r\\n (esc) (no-py3 !)
- write(9 from 9) -> (9) 4\r\nnone\r\n (no-py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+ write(36) -> HTTP/1.1 200 Script output follows\r\n (no-py3 !)
+ write(23) -> Server: badhttpserver\r\n (no-py3 !)
+ write(37) -> Date: $HTTP_DATE$\r\n (no-py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n (no-py3 !)
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc) (no-py3 !)
+ write(9) -> 4\r\nnone\r\n (no-py3 !)
write(9 from 9) -> (0) 4\r\nHG20\r\n (no-py3 !)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n (no-py3 !)
$ rm -f error.log
Server sends partial bundle2 header magic
+-----------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=980 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='4\r\nHG2' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -748,9 +794,9 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -9
- sendall(167 from 167) -> (21) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (15) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (6) 4\r\nnone\r\n
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
sendall(6 from 9) -> (0) 4\r\nHG2
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -760,26 +806,29 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (21) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (23) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (21) \r\n (no-py3 !)
- write(6 from 6) -> (15) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (6) 4\r\nnone\r\n
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n (no-py3 !)
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
write(6 from 9) -> (0) 4\r\nHG2
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server sends incomplete bundle2 stream params length
+----------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=989 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='4\r\n\0\0\0' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -793,10 +842,10 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
- sendall(167 from 167) -> (30) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (24) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (15) 4\r\nnone\r\n
- sendall(9 from 9) -> (6) 4\r\nHG20\r\n
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
sendall(6 from 9) -> (0) 4\\r\\n\x00\x00\x00 (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -806,27 +855,30 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (30) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (32) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (30) \r\n (no-py3 !)
- write(6 from 6) -> (24) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (15) 4\r\nnone\r\n
- write(9 from 9) -> (6) 4\r\nHG20\r\n
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
write(6 from 9) -> (0) 4\\r\\n\x00\x00\x00 (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Servers stops after bundle2 stream params header
+------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=992 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='4\r\n\0\0\0\0\r\n' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -839,10 +891,10 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -10
- sendall(167 from 167) -> (33) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (27) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (18) 4\r\nnone\r\n
- sendall(9 from 9) -> (9) 4\r\nHG20\r\n
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -852,27 +904,30 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (33) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (35) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (33) \r\n (no-py3 !)
- write(6 from 6) -> (27) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (18) 4\r\nnone\r\n
- write(9 from 9) -> (9) 4\r\nHG20\r\n
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops sending after bundle2 part header length
+-----------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1001 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='4\r\n\0\0\0\)\r\n' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -885,11 +940,11 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
- sendall(167 from 167) -> (42) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (36) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (27) 4\r\nnone\r\n
- sendall(9 from 9) -> (18) 4\r\nHG20\r\n
- sendall(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -900,28 +955,31 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -13
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (42) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (44) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (42) \r\n (no-py3 !)
- write(6 from 6) -> (36) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (27) 4\r\nnone\r\n
- write(9 from 9) -> (18) 4\r\nHG20\r\n
- write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops sending after bundle2 part header
+----------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1048 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns="version02nbchanges1\\r\\n" \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -937,12 +995,12 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
- sendall(167 from 167) -> (89) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (83) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (74) 4\r\nnone\r\n
- sendall(9 from 9) -> (65) 4\r\nHG20\r\n
- sendall(9 from 9) -> (56) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (47) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
sendall(47 from 47) -> (0) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -952,29 +1010,32 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (89) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (91) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (89) \r\n (no-py3 !)
- write(6 from 6) -> (83) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (74) 4\r\nnone\r\n
- write(9 from 9) -> (65) 4\r\nHG20\r\n
- write(9 from 9) -> (56) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (47) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
write(47 from 47) -> (0) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops after bundle2 part payload chunk size
+--------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1069 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='1d2\r\n.......' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -991,14 +1052,14 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
- sendall(167 from 167) -> (110) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (104) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (95) 4\r\nnone\r\n
- sendall(9 from 9) -> (86) 4\r\nHG20\r\n
- sendall(9 from 9) -> (77) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (68) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (21) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (12) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
sendall(12 from 473) -> (0) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1d (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -1008,29 +1069,32 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -15
- write(167 from 167) -> (110) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(2 from 2) -> (110) \r\n (no-py3 !)
- write(6 from 6) -> (104) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (95) 4\r\nnone\r\n
- write(9 from 9) -> (86) 4\r\nHG20\r\n
- write(9 from 9) -> (77) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (21) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (12) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(28) -> Transfer-Encoding: chunked\r\n
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
write(12 from 473) -> (0) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1d (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops sending in middle of bundle2 payload chunk
+-------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1530 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns=':jL\0\0\x00\0\0\0\0\0\r\n' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1046,14 +1110,14 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
- sendall(167 from 167) -> (571) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
- sendall(6 from 6) -> (565) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (556) 4\r\nnone\r\n
- sendall(9 from 9) -> (547) 4\r\nHG20\r\n
- sendall(9 from 9) -> (538) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (529) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (482) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (473) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
sendall(473 from 473) -> (0) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -1063,31 +1127,34 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
- readline(65537) -> (2) \r\n (py3 !)
- write(167 from 167) -> (571) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
- write(28 from 28) -> (573) Transfer-Encoding: chunked\r\n (no-py3 !)
- write(2 from 2) -> (571) \r\n (no-py3 !)
- write(6 from 6) -> (565) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (556) 4\r\nnone\r\n
- write(9 from 9) -> (547) 4\r\nHG20\r\n
- write(9 from 9) -> (538) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (529) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (482) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (473) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ readline(~) -> (2) \r\n (py3 !)
+ write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+ write(41) -> Content-Type: application/mercurial-0.2\r\n
+ write(28) -> Transfer-Encoding: chunked\r\n (no-py3 !)
+ write(2) -> \r\n (no-py3 !)
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
write(473 from 473) -> (0) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops sending after 0 length payload chunk size
+------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1561 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns=LISTKEYS \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1106,16 +1173,16 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
- sendall(6 from 6) -> (596) 1\\r\\n\x04\\r\\n (esc)
- sendall(9 from 9) -> (587) 4\r\nnone\r\n
- sendall(9 from 9) -> (578) 4\r\nHG20\r\n
- sendall(9 from 9) -> (569) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (560) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (513) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (504) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- sendall(473 from 473) -> (31) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (22) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (13) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ sendall(6) -> 1\\r\\n\x04\\r\\n (esc)
+ sendall(9) -> 4\r\nnone\r\n
+ sendall(9) -> 4\r\nHG20\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
sendall(13 from 38) -> (0) 20\\r\\n\x08LISTKEYS (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -1125,31 +1192,35 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -17
- write(6 from 6) -> (596) 1\\r\\n\x04\\r\\n (esc)
- write(9 from 9) -> (587) 4\r\nnone\r\n
- write(9 from 9) -> (578) 4\r\nHG20\r\n
- write(9 from 9) -> (569) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (560) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (513) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (504) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (31) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (22) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (13) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(2) -> \r\n
+ write(6) -> 1\\r\\n\x04\\r\\n (esc)
+ write(9) -> 4\r\nnone\r\n
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
write(13 from 38) -> (0) 20\\r\\n\x08LISTKEYS (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
+----------------------------------------------------------------------------------------
+
This is before the 0 size chunked transfer part that signals end of HTTP response.
- $ hg serve --config badserver.closeaftersendbytes=1736 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns='(.*4\r\n\0\0\0\0\r\n){5}' \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1166,20 +1237,20 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -20
- sendall(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- sendall(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- sendall(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- sendall(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- sendall(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- sendall(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- sendall(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- sendall(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -1189,35 +1260,38 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
- write(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
$ rm -rf clone
Server sends a size 0 chunked-transfer size without terminating \r\n
+--------------------------------------------------------------------
- $ hg serve --config badserver.closeaftersendbytes=1739 -p $HGPORT -d --pid-file=hg.pid -E error.log
+ $ hg serve \
+ > --config badserver.close-after-send-patterns="(.*4\\r\\n\0\0\0\0\\r\\n0\r\n)" \
+ > -p $HGPORT -d --pid-file=hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
@@ -1234,21 +1308,21 @@
#if py36
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -21
- sendall(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- sendall(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- sendall(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- sendall(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- sendall(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- sendall(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- sendall(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- sendall(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- sendall(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- sendall(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- sendall(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ sendall(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ sendall(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ sendall(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ sendall(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ sendall(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ sendall(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
sendall(3 from 5) -> (0) 0\r\n
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
@@ -1258,28 +1332,28 @@
#else
$ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -22
- write(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
- write(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
- write(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
- write(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
- write(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
- write(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
- write(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
- write(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
- write(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
- write(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
- write(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\r\nHG20\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+ write(47) -> 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02 \x01version02nbchanges1\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+ write(473) -> 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+ write(38) -> 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00 \x06namespacephases\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+ write(64) -> 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c 1\npublishing True\r\n
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+ write(41) -> 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00 namespacebookmarks\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+ write(9) -> 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
write(3 from 5) -> (0) 0\r\n
write limit reached; closing socket
$LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
Traceback (most recent call last):
Exception: connection closed after sending N bytes
- write(27) -> 15\r\nInternal Server Error\r\n
#endif
$ rm -f error.log
--- a/tests/test-http-protocol.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-http-protocol.t Fri Feb 18 14:27:43 2022 +0100
@@ -198,7 +198,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset compression=\$BUNDLE2_COMPRESSIONS\$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
sending listkeys command
s> setsockopt(6, 1, 1) -> None (?)
s> GET /?cmd=listkeys HTTP/1.1\r\n
@@ -252,121 +252,6 @@
s> bookmarks\t\n
s> namespaces\t\n
s> phases\t
-
-Client with HTTPv2 enabled advertises that and gets old capabilities response from old server
-
- $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
- > command heads
- > EOF
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- sending heads command
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=heads HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1\r\n
- s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 41\r\n
- s> \r\n
- s> 0000000000000000000000000000000000000000\n
- response: [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ killdaemons.py
- $ enablehttpv2 empty
- $ hg --config server.compressionengines=zlib -R empty serve -p $HGPORT -d --pid-file hg.pid
- $ cat hg.pid > $DAEMON_PIDS
-
-Client with HTTPv2 enabled automatically upgrades if the server supports it
-
- $ hg --config experimental.httppeer.advertise-v2=true --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
- > command heads
- > EOF
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- sending heads command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 56\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 1e\r\n
- s> \x16\x00\x00\x01\x00\x02\x041
- s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
$ killdaemons.py
HTTP client follows HTTP redirect on handshake to new repo
@@ -442,9 +327,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 484\r\n
+ s> Content-Length: \d+\\r\\n (re)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset compression=\$BUNDLE2_COMPRESSIONS\$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
Test with the HTTP peer
@@ -479,10 +364,10 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 484\r\n
+ s> Content-Length: \d+\\r\\n (re)
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset compression=\$BUNDLE2_COMPRESSIONS\$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
sending heads command
s> setsockopt(6, 1, 1) -> None (?)
s> GET /redirected?cmd=heads HTTP/1.1\r\n
@@ -750,10 +635,10 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 484\r\n
+ s> Content-Length: \d+\\r\\n (re)
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset compression=\$BUNDLE2_COMPRESSIONS\$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
sending heads command
s> setsockopt(6, 1, 1) -> None (?)
s> GET /redirected?cmd=heads HTTP/1.1\r\n
--- a/tests/test-import-bypass.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-import-bypass.t Fri Feb 18 14:27:43 2022 +0100
@@ -43,7 +43,7 @@
unable to find 'a' for patching
(use '--prefix' to apply patch relative to the current directory)
abort: patch failed to apply
- [255]
+ [20]
$ hg st
$ shortlog
o 1:4e322f7ce8e3 test 0 0 - foo - changea
@@ -234,7 +234,7 @@
patching file a
Hunk #1 FAILED at 0
abort: patch failed to apply
- [255]
+ [20]
$ hg --config patch.eol=auto import -d '0 0' -m 'test patch.eol' --bypass ../test.diff
applying ../test.diff
$ shortlog
--- a/tests/test-import-git.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-import-git.t Fri Feb 18 14:27:43 2022 +0100
@@ -519,7 +519,8 @@
> EOF
applying patch from stdin
abort: could not decode "binary2" binary patch: bad base85 character at position 6
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
$ hg revert -aq
$ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
@@ -534,7 +535,8 @@
> EOF
applying patch from stdin
abort: "binary2" length is 5 bytes, should be 6
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
$ hg revert -aq
$ hg import -d "1000000 0" -m rename-as-binary - <<"EOF"
@@ -548,7 +550,8 @@
> EOF
applying patch from stdin
abort: could not extract "binary2" binary data
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
Simulate a copy/paste turning LF into CRLF (issue2870)
@@ -748,7 +751,7 @@
> EOF
applying patch from stdin
abort: cannot create b: destination already exists
- [255]
+ [20]
$ cat b
b
@@ -768,7 +771,7 @@
cannot create b: destination already exists
1 out of 1 hunks FAILED -- saving rejects to file b.rej
abort: patch failed to apply
- [255]
+ [20]
$ cat b
b
@@ -791,7 +794,7 @@
Hunk #1 FAILED at 0
1 out of 1 hunks FAILED -- saving rejects to file linkb.rej
abort: patch failed to apply
- [255]
+ [20]
$ hg st
? b.rej
? linkb.rej
--- a/tests/test-import-unknown.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-import-unknown.t Fri Feb 18 14:27:43 2022 +0100
@@ -29,7 +29,7 @@
file added already exists
1 out of 1 hunks FAILED -- saving rejects to file added.rej
abort: patch failed to apply
- [255]
+ [20]
Test modifying an unknown file
@@ -41,7 +41,7 @@
$ hg import --no-commit ../unknown.diff
applying ../unknown.diff
abort: cannot patch changed: file is not tracked
- [255]
+ [20]
Test removing an unknown file
@@ -54,7 +54,7 @@
$ hg import --no-commit ../unknown.diff
applying ../unknown.diff
abort: cannot patch removed: file is not tracked
- [255]
+ [20]
Test copying onto an unknown file
@@ -64,6 +64,6 @@
$ hg import --no-commit ../unknown.diff
applying ../unknown.diff
abort: cannot create copied: destination already exists
- [255]
+ [20]
$ cd ..
--- a/tests/test-import.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-import.t Fri Feb 18 14:27:43 2022 +0100
@@ -234,7 +234,8 @@
$ hg --cwd b import -mpatch ../broken.patch
applying ../broken.patch
abort: bad hunk #1
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
$ rm -r b
hg -R repo import
@@ -834,7 +835,7 @@
Hunk #1 FAILED at 0
1 out of 1 hunks FAILED -- saving rejects to file a.rej
abort: patch failed to apply
- [255]
+ [20]
$ hg import --no-commit -v fuzzy-tip.patch
applying fuzzy-tip.patch
patching file a
@@ -853,7 +854,7 @@
Hunk #1 FAILED at 0
1 out of 1 hunks FAILED -- saving rejects to file a.rej
abort: patch failed to apply
- [255]
+ [20]
$ hg up -qC
$ hg import --config patch.fuzz=2 --exact fuzzy-reparent.patch
applying fuzzy-reparent.patch
@@ -1084,7 +1085,7 @@
> EOF
applying patch from stdin
abort: path contains illegal component: ../outside/foo
- [255]
+ [10]
$ cd ..
@@ -2054,7 +2055,7 @@
(use '--prefix' to apply patch relative to the current directory)
1 out of 1 hunks FAILED -- saving rejects to file file1.rej
abort: patch failed to apply
- [255]
+ [20]
test import crash (issue5375)
$ cd ..
@@ -2064,7 +2065,7 @@
applying patch from stdin
a not tracked!
abort: source file 'a' does not exist
- [255]
+ [20]
test immature end of hunk
@@ -2076,7 +2077,8 @@
> EOF
applying patch from stdin
abort: bad hunk #1: incomplete hunk
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
$ hg import - <<'EOF'
> diff --git a/foo b/foo
@@ -2087,4 +2089,5 @@
> EOF
applying patch from stdin
abort: bad hunk #1: incomplete hunk
- [255]
+ (check that whitespace in the patch has not been mangled)
+ [10]
--- a/tests/test-infinitepush-ci.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-infinitepush-ci.t Fri Feb 18 14:27:43 2022 +0100
@@ -204,7 +204,7 @@
$ hg pull -r b4e4bce660512ad3e71189e14588a70ac8e31fef
pulling from $TESTTMP/repo
abort: unknown revision 'b4e4bce660512ad3e71189e14588a70ac8e31fef'
- [255]
+ [10]
$ hg glog
o 1:6cb0989601f1 added a
| public
--- a/tests/test-inherit-mode.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-inherit-mode.t Fri Feb 18 14:27:43 2022 +0100
@@ -54,6 +54,7 @@
00770 ./.hg/cache/
00600 ./.hg/requires
00770 ./.hg/store/
+ 00600 ./.hg/store/requires
00770 ./.hg/wcache/
$ mkdir dir
@@ -92,6 +93,7 @@
00660 ./.hg/store/data/foo/index (reposimplestore !)
00660 ./.hg/store/fncache (repofncache !)
00660 ./.hg/store/phaseroots
+ 00600 ./.hg/store/requires
00660 ./.hg/store/undo
00660 ./.hg/store/undo.backupfiles
00660 ./.hg/store/undo.phaseroots
@@ -121,6 +123,7 @@
00770 ../push/.hg/cache/
00660 ../push/.hg/requires
00770 ../push/.hg/store/
+ 00660 ../push/.hg/store/requires
00770 ../push/.hg/wcache/
$ umask 077
@@ -152,6 +155,7 @@
00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
00660 ../push/.hg/store/data/foo/index (reposimplestore !)
00660 ../push/.hg/store/fncache (repofncache !)
+ 00660 ../push/.hg/store/requires
00660 ../push/.hg/store/undo
00660 ../push/.hg/store/undo.backupfiles
00660 ../push/.hg/store/undo.phaseroots
--- a/tests/test-init.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-init.t Fri Feb 18 14:27:43 2022 +0100
@@ -9,7 +9,7 @@
> if [ -f "$name"/.hg/00changelog.i ]; then
> echo 00changelog.i created
> fi
- > cat "$name"/.hg/requires
+ > hg debugrequires -R "$name"
> }
creating 'local'
@@ -25,6 +25,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -80,6 +81,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -96,6 +98,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -112,6 +115,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
store
testonly-simplestore (reposimplestore !)
@@ -232,6 +236,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -255,6 +260,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -274,6 +280,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-issue6528.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-issue6528.t Fri Feb 18 14:27:43 2022 +0100
@@ -187,6 +187,11 @@
#endif
Check that the issue is present
+(It is currently not present with rhg but will be when optimizations are added
+to resolve ambiguous files at the end of status without reading their content
+if the size differs, and reading the expected size without resolving filelog
+deltas where possible.)
+
$ hg st
M D.txt
M b.txt
--- a/tests/test-issue672.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-issue672.t Fri Feb 18 14:27:43 2022 +0100
@@ -65,7 +65,7 @@
ancestor: c64f439569a9, local: f4a9cff3cd0b+, remote: 746e9549ea96
starting 4 threads for background file closing (?)
preserving 1a for resolve of 1a
- 1a: local copied/moved from 1 -> m (premerge)
+ 1a: local copied/moved from 1 -> m
picked tool ':merge' for 1a (binary False symlink False changedelete False)
merging 1a and 1 to 1a
my 1a@f4a9cff3cd0b+ other 1@746e9549ea96 ancestor 1@c64f439569a9
@@ -89,7 +89,7 @@
starting 4 threads for background file closing (?)
preserving 1 for resolve of 1a
removing 1
- 1a: remote moved from 1 -> m (premerge)
+ 1a: remote moved from 1 -> m
picked tool ':merge' for 1a (binary False symlink False changedelete False)
merging 1 and 1a to 1a
my 1a@746e9549ea96+ other 1a@f4a9cff3cd0b ancestor 1@c64f439569a9
--- a/tests/test-largefiles-misc.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-largefiles-misc.t Fri Feb 18 14:27:43 2022 +0100
@@ -41,7 +41,7 @@
> EOF
$ hg config extensions
- \*\*\* failed to import extension largefiles from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
+ \*\*\* failed to import extension "largefiles" from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
abort: repository requires features unknown to this Mercurial: largefiles
(see https://mercurial-scm.org/wiki/MissingRequirement for more information)
[255]
@@ -267,7 +267,7 @@
getting changed largefiles
1 largefiles updated, 0 removed
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ grep largefiles ../shared_lfrepo/.hg/requires
+ $ hg debugrequires -R ../shared_lfrepo | grep largefiles
largefiles
verify that large files in subrepos handled properly
@@ -962,7 +962,7 @@
what do you want to do? l
getting changed largefiles
1 largefiles updated, 0 removed
- 0 files updated, 4 files merged, 0 files removed, 0 files unresolved
+ 1 files updated, 3 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat f-different
1
--- a/tests/test-largefiles-update.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-largefiles-update.t Fri Feb 18 14:27:43 2022 +0100
@@ -68,20 +68,39 @@
A linear merge will update standins before performing the actual merge. It will
do a lfdirstate status walk and find 'unset'/'unsure' files, hash them, and
update the corresponding standins.
+
Verify that it actually marks the clean files as clean in lfdirstate so
we don't have to hash them again next time we update.
+# note:
+# We do this less agressively now, to avoid race condition, however the
+# cache
+# is properly set after the next status
+#
+# The "changed" output is marked as missing-correct-output/known-bad-output
+# for clarify
+
$ hg up
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
updated to "f74e50bd9e55: #2"
1 other heads for branch "default"
$ hg debugdirstate --large --nodate
+ n 644 7 set large1 (missing-correct-output !)
+ n 644 13 set large2 (missing-correct-output !)
+ n 0 -1 unset large1 (known-bad-output !)
+ n 0 -1 unset large2 (known-bad-output !)
+ $ sleep 1 # so that mtime are not ambiguous
+ $ hg status
+ $ hg debugdirstate --large --nodate
n 644 7 set large1
n 644 13 set large2
Test that lfdirstate keeps track of last modification of largefiles and
prevents unnecessary hashing of content - also after linear/noop update
+(XXX Since there is a possible race during update, we only do this after the next
+status call, this is slower, but more correct)
+
$ sleep 1
$ hg st
$ hg debugdirstate --large --nodate
@@ -92,6 +111,13 @@
updated to "f74e50bd9e55: #2"
1 other heads for branch "default"
$ hg debugdirstate --large --nodate
+ n 644 7 set large1 (missing-correct-output !)
+ n 644 13 set large2 (missing-correct-output !)
+ n 0 -1 unset large1 (known-bad-output !)
+ n 0 -1 unset large2 (known-bad-output !)
+ $ sleep 1 # so that mtime are not ambiguous
+ $ hg status
+ $ hg debugdirstate --large --nodate
n 644 7 set large1
n 644 13 set large2
--- a/tests/test-largefiles-wireproto.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-largefiles-wireproto.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
This file contains testcases that tend to be related to the wire protocol part
of largefiles.
--- a/tests/test-lfconvert.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-lfconvert.t Fri Feb 18 14:27:43 2022 +0100
@@ -94,7 +94,7 @@
1276481102f218c981e0324180bafd9f sub/maybelarge.dat
"lfconvert" adds 'largefiles' to .hg/requires.
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -103,6 +103,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-lfs-largefiles.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-lfs-largefiles.t Fri Feb 18 14:27:43 2022 +0100
@@ -288,17 +288,8 @@
The requirement is added to the destination repo.
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
+ $ hg debugrequires | grep lfs
lfs
- persistent-nodemap (rust !)
- revlog-compression-zstd (zstd !)
- revlogv1
- sparserevlog
- store
$ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n'
o 8 large_by_size.bin (remove large_by_size.bin)
@@ -345,7 +336,7 @@
breaks you can get 1048576 lines of +y in the output, which takes a looooooong
time to print.
$ hg diff -r 2:3 | head -n 20
- $ hg diff -r 2:6
+ $ hg diff -r 2:6 | head -n 20
diff -r e989d0fa3764 -r 752e3a0d8488 large.bin
--- a/large.bin Thu Jan 01 00:00:00 1970 +0000
+++ b/large.bin Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-lfs-serve.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-lfs-serve.t Fri Feb 18 14:27:43 2022 +0100
@@ -34,6 +34,7 @@
$ hg init server
$ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
+ $ SERVER_PATH="$TESTTMP/server/"
$ cat > $TESTTMP/debugprocessors.py <<EOF
> from mercurial import (
@@ -85,7 +86,9 @@
$ cat hg.pid >> $DAEMON_PIDS
$ hg clone -q http://localhost:$HGPORT client
- $ grep 'lfs' client/.hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R client | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
This trivial repo will force commandserver to load the extension, but not call
@@ -129,24 +132,27 @@
+non-lfs
*** runcommand debugupgraderepo -q --run
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
#if lfsremote-on
$ hg push -q
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
$ hg clone -q http://localhost:$HGPORT $TESTTMP/client1_clone
- $ grep 'lfs' $TESTTMP/client1_clone/.hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $TESTTMP/client1_clone/ | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
$ hg init $TESTTMP/client1_pull
$ hg -R $TESTTMP/client1_pull pull -q http://localhost:$HGPORT
- $ grep 'lfs' $TESTTMP/client1_pull/.hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $TESTTMP/client1_pull/ | grep 'lfs'
[1]
-
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
+ [1]
$ hg identify http://localhost:$HGPORT
d437e1d24fbd
@@ -167,16 +173,22 @@
extension is not enabled remotely.
$ hg push -q
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
+ $ hg debugrequires | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
$ hg clone -q http://localhost:$HGPORT $TESTTMP/client2_clone
- $ grep 'lfs' $TESTTMP/client2_clone/.hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $TESTTMP/client2_clone/ | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
$ hg init $TESTTMP/client2_pull
$ hg -R $TESTTMP/client2_pull pull -q http://localhost:$HGPORT
- $ grep 'lfs' $TESTTMP/client2_pull/.hg/requires $SERVER_REQUIRES
+ $ hg debugrequires -R $TESTTMP/client2_pull/ | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
[1]
$ hg identify http://localhost:$HGPORT
@@ -189,8 +201,10 @@
$ echo 'this is a big lfs file' > lfs.bin
$ hg ci -Aqm 'lfs'
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- .hg/requires:lfs
+ $ hg debugrequires | grep 'lfs'
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
+ [1]
#if lfsremote-off
$ hg push -q
@@ -200,20 +214,24 @@
#else
$ hg push -q
#endif
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- .hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
+ $ hg debugrequires | grep 'lfs'
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs' || true
+ lfs (lfsremote-on !)
$ hg clone -q http://localhost:$HGPORT $TESTTMP/client3_clone
- $ grep 'lfs' $TESTTMP/client3_clone/.hg/requires $SERVER_REQUIRES || true
- $TESTTMP/client3_clone/.hg/requires:lfs (lfsremote-on !)
- $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
+
+ $ hg debugrequires -R $TESTTMP/client3_clone/ | grep 'lfs' || true
+ lfs (lfsremote-on !)
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs' || true
+ lfs (lfsremote-on !)
$ hg init $TESTTMP/client3_pull
$ hg -R $TESTTMP/client3_pull pull -q http://localhost:$HGPORT
- $ grep 'lfs' $TESTTMP/client3_pull/.hg/requires $SERVER_REQUIRES || true
- $TESTTMP/client3_pull/.hg/requires:lfs (lfsremote-on !)
- $TESTTMP/server/.hg/requires:lfs (lfsremote-on !)
+ $ hg debugrequires -R $TESTTMP/client3_pull/ | grep 'lfs' || true
+ lfs (lfsremote-on !)
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs' || true
+ lfs (lfsremote-on !)
Test that the commit/changegroup requirement check hook can be run multiple
times.
@@ -267,23 +285,24 @@
> EOF
$ echo 'non-lfs' > nonlfs2.txt
$ hg ci -Aqm 'non-lfs'
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $SERVER_PATH --config extensions.lfs= | grep 'lfs'
+ lfs
$ hg push -q --force
warning: repository is unrelated
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $SERVER_PATH --config extensions.lfs= | grep 'lfs'
+ lfs
$ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
(remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
abort: repository requires features unknown to this Mercurial: lfs
(see https://mercurial-scm.org/wiki/MissingRequirement for more information)
[255]
- $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
- grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
- $TESTTMP/server/.hg/requires:lfs
- [2]
+ $ hg debugrequires -R $TESTTMP/client4_clone/ | grep 'lfs'
+ abort: repository $TESTTMP/client4_clone/ not found
+ [1]
+ $ hg debugrequires -R $SERVER_PATH --config extensions.lfs= | grep 'lfs'
+ lfs
TODO: fail more gracefully.
@@ -294,8 +313,10 @@
remote: abort: no common changegroup version
abort: pull failed on remote
[100]
- $ grep 'lfs' $TESTTMP/client4_pull/.hg/requires $SERVER_REQUIRES
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $TESTTMP/client4_pull/ | grep 'lfs'
+ [1]
+ $ hg debugrequires -R $SERVER_PATH --config extensions.lfs= | grep 'lfs'
+ lfs
$ hg identify http://localhost:$HGPORT
03b080fa9d93
@@ -312,19 +333,21 @@
$ hg ci -Aqm 'non-lfs file with lfs client'
$ hg push -q
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
+ lfs
$ hg clone -q http://localhost:$HGPORT $TESTTMP/client5_clone
- $ grep 'lfs' $TESTTMP/client5_clone/.hg/requires $SERVER_REQUIRES
- $TESTTMP/client5_clone/.hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $TESTTMP/client5_clone/ | grep 'lfs'
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
+ lfs
$ hg init $TESTTMP/client5_pull
$ hg -R $TESTTMP/client5_pull pull -q http://localhost:$HGPORT
- $ grep 'lfs' $TESTTMP/client5_pull/.hg/requires $SERVER_REQUIRES
- $TESTTMP/client5_pull/.hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $TESTTMP/client5_pull/ | grep 'lfs'
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep 'lfs'
+ lfs
$ hg identify http://localhost:$HGPORT
c729025cc5e3
@@ -463,14 +486,16 @@
remote: adding file changes
remote: added 1 changesets with 1 changes to 1 files
(sent 8 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
- $ grep 'lfs' .hg/requires $SERVER_REQUIRES
- .hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires | grep lfs
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep lfs
+ lfs
$ hg clone -q http://localhost:$HGPORT $TESTTMP/client6_clone
- $ grep 'lfs' $TESTTMP/client6_clone/.hg/requires $SERVER_REQUIRES
- $TESTTMP/client6_clone/.hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $TESTTMP/client6_clone/ | grep lfs
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep lfs
+ lfs
$ hg init $TESTTMP/client6_pull
$ hg -R $TESTTMP/client6_pull pull -u -v http://localhost:$HGPORT
@@ -495,9 +520,10 @@
updated to "d3b84d50eacb: lfs file with lfs client"
1 other heads for branch "default"
(sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
- $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
- $TESTTMP/client6_pull/.hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires -R $TESTTMP/client6_pull/ | grep lfs
+ lfs
+ $ hg debugrequires -R $SERVER_PATH | grep lfs
+ lfs
$ hg identify http://localhost:$HGPORT
d3b84d50eacb
--- a/tests/test-lfs.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-lfs.t Fri Feb 18 14:27:43 2022 +0100
@@ -40,7 +40,7 @@
> EOF
$ hg config extensions
- \*\*\* failed to import extension lfs from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
+ \*\*\* failed to import extension "lfs" from missing.py: [Errno *] $ENOENT$: 'missing.py' (glob)
abort: repository requires features unknown to this Mercurial: lfs
(see https://mercurial-scm.org/wiki/MissingRequirement for more information)
[255]
@@ -75,10 +75,10 @@
# Commit large file
$ echo $LONG > largefile
- $ grep lfs .hg/requires
+ $ hg debugrequires | grep lfs
[1]
$ hg commit --traceback -Aqm "add large file"
- $ grep lfs .hg/requires
+ $ hg debugrequires | grep lfs
lfs
# Ensure metadata is stored
@@ -114,7 +114,7 @@
Push to a local non-lfs repo with the extension enabled will add the
lfs requirement
- $ grep lfs $TESTTMP/server/.hg/requires
+ $ hg debugrequires -R $TESTTMP/server/ | grep lfs
[1]
$ hg push -v | egrep -v '^(uncompressed| )'
pushing to $TESTTMP/server
@@ -126,7 +126,7 @@
adding file changes
calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
added 2 changesets with 3 changes to 3 files
- $ grep lfs $TESTTMP/server/.hg/requires
+ $ hg debugrequires -R $TESTTMP/server/ | grep lfs
lfs
# Unknown URL scheme
@@ -150,8 +150,9 @@
Pulling a local lfs repo into a local non-lfs repo with the extension
enabled adds the lfs requirement
- $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires | grep lfs || true
+ $ hg debugrequires -R $TESTTMP/server/ | grep lfs
+ lfs
$ hg pull default
pulling from $TESTTMP/server
requesting all changes
@@ -161,9 +162,10 @@
added 2 changesets with 3 changes to 3 files
new changesets 0ead593177f7:b88141481348
(run 'hg update' to get a working copy)
- $ grep lfs .hg/requires $TESTTMP/server/.hg/requires
- .hg/requires:lfs
- $TESTTMP/server/.hg/requires:lfs
+ $ hg debugrequires | grep lfs
+ lfs
+ $ hg debugrequires -R $TESTTMP/server/ | grep lfs
+ lfs
# Check the blobstore is not yet populated
$ [ -d .hg/store/lfs/objects ]
@@ -314,7 +316,7 @@
$ hg --config extensions.share= share repo7 sharedrepo
updating working directory
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ grep lfs sharedrepo/.hg/requires
+ $ hg debugrequires -R sharedrepo/ | grep lfs
lfs
# Test rename and status
@@ -1002,7 +1004,7 @@
2 a
1 b
0 meta
- $ grep 'lfs' convert_normal/.hg/requires
+ $ hg debugrequires -R convert_normal | grep 'lfs'
[1]
$ hg --cwd convert_normal cat a1 -r 0 -T '{rawdata}'
THIS-IS-LFS-BECAUSE-10-BYTES
@@ -1044,7 +1046,7 @@
1: a2: 5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024
2: a2: 876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943
- $ grep 'lfs' convert_lfs/.hg/requires
+ $ hg debugrequires -R convert_lfs | grep 'lfs'
lfs
The hashes in all stages of the conversion are unchanged.
@@ -1075,7 +1077,7 @@
2 large to small, small to large
1 random modifications
0 switch large and small again
- $ grep 'lfs' convert_normal2/.hg/requires
+ $ hg debugrequires -R convert_normal2 | grep 'lfs'
[1]
$ hg --cwd convert_normal2 debugdata large 0
LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS
@@ -1091,7 +1093,7 @@
2 large to small, small to large
1 random modifications
0 switch large and small again
- $ grep 'lfs' convert_lfs2/.hg/requires
+ $ hg debugrequires -R convert_lfs2 | grep 'lfs'
lfs
$ hg --cwd convert_lfs2 debugdata large 0
version https://git-lfs.github.com/spec/v1
@@ -1202,10 +1204,10 @@
$ hg bundle -R convert_lfs2 -qr tip --base null lfs.hg
$ hg init unbundle
$ hg pull -R unbundle -q nolfs.hg
- $ grep lfs unbundle/.hg/requires
+ $ hg debugrequires -R unbundle | grep lfs
[1]
$ hg pull -R unbundle -q lfs.hg
- $ grep lfs unbundle/.hg/requires
+ $ hg debugrequires -R unbundle | grep lfs
lfs
$ hg init no_lfs
@@ -1224,7 +1226,7 @@
pushing to no_lfs
abort: required features are not supported in the destination: lfs
[255]
- $ grep lfs no_lfs/.hg/requires
+ $ hg debugrequires -R no_lfs/ | grep lfs
[1]
Pulling from a local lfs repo to a local repo without an lfs requirement and
@@ -1234,5 +1236,5 @@
pulling from convert_lfs2
abort: required features are not supported in the destination: lfs
[255]
- $ grep lfs no_lfs2/.hg/requires
+ $ hg debugrequires -R no_lfs2/ | grep lfs
[1]
--- a/tests/test-log-bookmark.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-log-bookmark.t Fri Feb 18 14:27:43 2022 +0100
@@ -189,10 +189,10 @@
$ hg log -B unknown
abort: bookmark 'unknown' does not exist
- [255]
+ [10]
Shouldn't accept string-matcher syntax:
$ hg log -B 're:.*'
abort: bookmark 're:.*' does not exist
- [255]
+ [10]
--- a/tests/test-log-linerange.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-log-linerange.t Fri Feb 18 14:27:43 2022 +0100
@@ -1150,4 +1150,4 @@
$ hg ci -m 'remove baz' --quiet
$ hg log -f -L dir/baz,5:7 -p
abort: cannot follow file not in parent revision: "dir/baz"
- [255]
+ [20]
--- a/tests/test-log.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-log.t Fri Feb 18 14:27:43 2022 +0100
@@ -122,13 +122,13 @@
$ hg log -qfl1 '' inexistent
abort: cannot follow file not in parent revision: "inexistent"
- [255]
+ [20]
$ hg log -qfl1 . inexistent
abort: cannot follow file not in parent revision: "inexistent"
- [255]
+ [20]
$ hg log -qfl1 "`pwd`" inexistent
abort: cannot follow file not in parent revision: "inexistent"
- [255]
+ [20]
$ hg log -qfl1 '' e
4:7e4639b4691b
@@ -145,7 +145,7 @@
$ hg log -f dir
abort: cannot follow file not in parent revision: "dir"
- [255]
+ [20]
-f, directory
@@ -552,7 +552,7 @@
$ hg log -T '{rev}\n' -fr4 e x
abort: cannot follow file not in any of the specified revisions: "x"
- [255]
+ [20]
follow files from the specified revisions with directory patterns
(BROKEN: should follow copies from dir/b@2)
@@ -1417,7 +1417,7 @@
$ hg log -b 're:.*'
abort: unknown revision 're:.*'
- [255]
+ [10]
$ hg log -k 're:.*'
$ hg log -u 're:.*'
@@ -1544,7 +1544,7 @@
$ hg log -b dummy
abort: unknown revision 'dummy'
- [255]
+ [10]
log -b .
@@ -2422,7 +2422,7 @@
$ hg log -T '== {rev} ==\n' -fr'wdir()' --git --stat notfound
abort: cannot follow file not in any of the specified revisions: "notfound"
- [255]
+ [20]
follow files from wdir and non-wdir revision:
@@ -2435,15 +2435,15 @@
$ hg log -T '{rev}\n' -f d1/f2
abort: cannot follow nonexistent file: "d1/f2"
- [255]
+ [20]
$ hg log -T '{rev}\n' -f f1-copy
abort: cannot follow nonexistent file: "f1-copy"
- [255]
+ [20]
$ hg log -T '{rev}\n' -f .d6/f1
abort: cannot follow file not in parent revision: ".d6/f1"
- [255]
+ [20]
$ hg revert -aqC
--- a/tests/test-merge-changedelete.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-changedelete.t Fri Feb 18 14:27:43 2022 +0100
@@ -680,12 +680,12 @@
changed
--- file3 ---
3
- <<<<<<< working copy: 13910f48cf7b - test: changed file1, removed file2, chan...
+ <<<<<<< working copy: 13910f48cf7b - test: changed file1, removed file2, c...
changed2
- ||||||| base
+ ||||||| common ancestor: ab57bf49aa27 - test: added files
=======
changed1
- >>>>>>> merge rev: 10f9a0a634e8 - test: removed file1, changed file2, chan...
+ >>>>>>> merge rev: 10f9a0a634e8 - test: removed file1, changed file2, c...
Exercise transitions between local, other, fail and prompt, and make sure the
dirstate stays consistent. (Compare with each other and to the above
--- a/tests/test-merge-commit.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-commit.t Fri Feb 18 14:27:43 2022 +0100
@@ -72,7 +72,7 @@
ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 0555950ead28
starting 4 threads for background file closing (?)
preserving bar for resolve of bar
- bar: versions differ -> m (premerge)
+ bar: versions differ -> m
picked tool ':merge' for bar (binary False symlink False changedelete False)
merging bar
my bar@2263c1be0967+ other bar@0555950ead28 ancestor bar@0f2ff26688b9
@@ -159,7 +159,7 @@
ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 3ffa6b9e35f0
starting 4 threads for background file closing (?)
preserving bar for resolve of bar
- bar: versions differ -> m (premerge)
+ bar: versions differ -> m
picked tool ':merge' for bar (binary False symlink False changedelete False)
merging bar
my bar@2263c1be0967+ other bar@3ffa6b9e35f0 ancestor bar@0f2ff26688b9
--- a/tests/test-merge-criss-cross.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-criss-cross.t Fri Feb 18 14:27:43 2022 +0100
@@ -93,13 +93,10 @@
f1: remote is newer -> g
getting f1
preserving f2 for resolve of f2
- f2: versions differ -> m (premerge)
+ f2: versions differ -> m
picked tool ':dump' for f2 (binary False symlink False changedelete False)
merging f2
my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527
- f2: versions differ -> m (merge)
- picked tool ':dump' for f2 (binary False symlink False changedelete False)
- my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527
1 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
--- a/tests/test-merge-exec.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-exec.t Fri Feb 18 14:27:43 2022 +0100
@@ -4,7 +4,6 @@
#require execbit
-
Initial setup
==============
--- a/tests/test-merge-force.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-force.t Fri Feb 18 14:27:43 2022 +0100
@@ -218,27 +218,27 @@
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_content1_content4-tracked
+ warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark')
merging content1_content2_content2_content1-tracked
merging content1_content2_content2_content4-tracked
+ warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
merging content1_content2_content3_content1-tracked
merging content1_content2_content3_content3-tracked
+ warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
merging content1_content2_content3_content4-tracked
+ warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
merging content1_content2_missing_content1-tracked
merging content1_content2_missing_content4-tracked
+ warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_content2_content4-tracked
+ warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_content3_content3-tracked
+ warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_content3_content4-tracked
+ warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_missing_content4-tracked
+ warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_missing_content4-untracked
- warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
warning: conflicts while merging missing_content2_missing_content4-untracked! (edit, then use 'hg resolve --mark')
18 files updated, 3 files merged, 8 files removed, 35 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
@@ -398,13 +398,13 @@
content2
M content1_content2_content1_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 8ef80617fa20 - test: base
content1
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M content1_content2_content1_content4-untracked
content2
@@ -428,13 +428,13 @@
content2
M content1_content2_content2_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 8ef80617fa20 - test: base
content1
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M content1_content2_content2_content4-untracked
content2
@@ -458,25 +458,25 @@
content2
M content1_content2_content3_content3-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content3
- ||||||| base
+ ||||||| common ancestor: 8ef80617fa20 - test: base
content1
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M content1_content2_content3_content3-untracked
content2
M content1_content2_content3_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 8ef80617fa20 - test: base
content1
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M content1_content2_content3_content4-untracked
content2
@@ -500,13 +500,13 @@
content2
M content1_content2_missing_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 8ef80617fa20 - test: base
content1
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M content1_content2_missing_content4-untracked
content2
@@ -584,12 +584,12 @@
content2
M missing_content2_content2_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 000000000000 - :
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M missing_content2_content2_content4-untracked
content2
@@ -607,23 +607,23 @@
content2
M missing_content2_content3_content3-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content3
- ||||||| base
+ ||||||| common ancestor: 000000000000 - :
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M missing_content2_content3_content3-untracked
content2
M missing_content2_content3_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 000000000000 - :
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M missing_content2_content3_content4-untracked
content2
@@ -641,20 +641,20 @@
content2
M missing_content2_missing_content4-tracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 000000000000 - :
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M missing_content2_missing_content4-untracked
- <<<<<<< working copy: 0447570f1af6 - test: local
+ <<<<<<< working copy: 0447570f1af6 - test: local
content4
- ||||||| base
+ ||||||| common ancestor: 000000000000 - :
=======
content2
- >>>>>>> merge rev: 85100b8c675b - test: remote
+ >>>>>>> merge rev: 85100b8c675b - test: remote
M missing_content2_missing_missing-tracked
content2
@@ -735,6 +735,7 @@
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_content1_content4-tracked
+ warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark')
file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
@@ -752,6 +753,7 @@
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_content2_content4-tracked
+ warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
@@ -769,10 +771,12 @@
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_content3_content3-tracked
+ warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_content3_content4-tracked
+ warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
@@ -790,6 +794,7 @@
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
merging content1_content2_missing_content4-tracked
+ warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
@@ -812,19 +817,14 @@
You can use (c)hanged version, (d)elete, or leave (u)nresolved.
What do you want to do? u
merging missing_content2_content2_content4-tracked
+ warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_content3_content3-tracked
+ warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_content3_content4-tracked
+ warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_missing_content4-tracked
+ warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
merging missing_content2_missing_content4-untracked
- warning: conflicts while merging content1_content2_content1_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging content1_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content2_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content3_content3-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_content3_content4-tracked! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging missing_content2_missing_content4-tracked! (edit, then use 'hg resolve --mark')
warning: conflicts while merging missing_content2_missing_content4-untracked! (edit, then use 'hg resolve --mark')
[1]
$ checkstatus > $TESTTMP/status2 2>&1
--- a/tests/test-merge-halt.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-halt.t Fri Feb 18 14:27:43 2022 +0100
@@ -24,8 +24,8 @@
$ hg rebase -s 1 -d 2 --tool false
rebasing 1:1f28a51c3c9b "c"
merging a
+ merging a failed!
merging b
- merging a failed!
merging b failed!
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[240]
@@ -42,7 +42,6 @@
$ hg rebase -s 1 -d 2 --tool false
rebasing 1:1f28a51c3c9b "c"
merging a
- merging b
merging a failed!
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[240]
@@ -67,9 +66,9 @@
> EOS
rebasing 1:1f28a51c3c9b "c"
merging a
- merging b
merging a failed!
continue merge operation (yn)? y
+ merging b
merging b failed!
continue merge operation (yn)? n
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
@@ -94,9 +93,9 @@
> EOS
rebasing 1:1f28a51c3c9b "c"
merging a
- merging b
output file a appears unchanged
was merge successful (yn)? y
+ merging b
output file b appears unchanged
was merge successful (yn)? n
merging b failed!
@@ -122,7 +121,6 @@
$ hg rebase -s 1 -d 2 --tool true
rebasing 1:1f28a51c3c9b "c"
merging a
- merging b
merging a failed!
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[240]
@@ -141,8 +139,8 @@
> EOS
rebasing 1:1f28a51c3c9b "c"
merging a
+ was merge of 'a' successful (yn)? y
merging b
- was merge of 'a' successful (yn)? y
was merge of 'b' successful (yn)? n
merging b failed!
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
@@ -159,8 +157,8 @@
$ hg rebase -s 1 -d 2 --tool echo --keep --config merge-tools.echo.premerge=keep
rebasing 1:1f28a51c3c9b "c"
merging a
+ $TESTTMP/repo/a *a~base* *a~other* (glob)
merging b
- $TESTTMP/repo/a *a~base* *a~other* (glob)
$TESTTMP/repo/b *b~base* *b~other* (glob)
Check that unshelve isn't broken by halting the merge
@@ -187,7 +185,6 @@
unshelving change 'default'
rebasing shelved changes
merging shelve_file1
- merging shelve_file2
merging shelve_file1 failed!
unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
[240]
@@ -195,7 +192,6 @@
M shelve_file1
M shelve_file2
? shelve_file1.orig
- ? shelve_file2.orig
# The repository is in an unfinished *unshelve* state.
# Unresolved merge conflicts:
@@ -210,7 +206,6 @@
$ hg resolve --tool false --all --re-merge
merging shelve_file1
- merging shelve_file2
merging shelve_file1 failed!
merge halted after failed merge (see hg resolve)
[240]
--- a/tests/test-merge-internal-tools-pattern.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-internal-tools-pattern.t Fri Feb 18 14:27:43 2022 +0100
@@ -130,7 +130,7 @@
$ hg merge 3
merging f
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ cat f
--- a/tests/test-merge-tools.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-tools.t Fri Feb 18 14:27:43 2022 +0100
@@ -578,7 +578,6 @@
$ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=nonexistentmergetool
couldn't find merge tool true (for pattern f)
merging f
- couldn't find merge tool true (for pattern f)
merging f failed!
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
@@ -604,7 +603,6 @@
$ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=/nonexistent/mergetool
couldn't find merge tool true (for pattern f)
merging f
- couldn't find merge tool true (for pattern f)
merging f failed!
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
@@ -1225,15 +1223,15 @@
# hg update -C 1
$ hg merge -r 4 --config merge-tools.true.premerge=keep-merge3
merging f
- <<<<<<< working copy: ef83787e2614 - test: revision 1
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
revision 1
space
- ||||||| base
+ ||||||| common ancestor: ffd2bda21d6e - test: revision 0
revision 0
space
=======
revision 4
- >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
revision 0
space
revision 4
@@ -1241,15 +1239,15 @@
(branch merge, don't forget to commit)
$ aftermerge
# cat f
- <<<<<<< working copy: ef83787e2614 - test: revision 1
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
revision 1
space
- ||||||| base
+ ||||||| common ancestor: ffd2bda21d6e - test: revision 0
revision 0
space
=======
revision 4
- >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
# hg stat
M f
# hg resolve --list
@@ -1266,12 +1264,12 @@
$ hg merge -r 4 --config merge-tools.true.premerge=keep-mergediff
merging f
<<<<<<<
- ------- base
- +++++++ working copy: ef83787e2614 - test: revision 1
+ ------- common ancestor: ffd2bda21d6e - test: revision 0
+ +++++++ working copy: ef83787e2614 - test: revision 1
-revision 0
+revision 1
space
- ======= merge rev: 81448d39c9a0 - test: revision 4
+ ======= merge rev: 81448d39c9a0 - test: revision 4
revision 4
>>>>>>>
revision 0
@@ -1282,12 +1280,12 @@
$ aftermerge
# cat f
<<<<<<<
- ------- base
- +++++++ working copy: ef83787e2614 - test: revision 1
+ ------- common ancestor: ffd2bda21d6e - test: revision 0
+ +++++++ working copy: ef83787e2614 - test: revision 1
-revision 0
+revision 1
space
- ======= merge rev: 81448d39c9a0 - test: revision 4
+ ======= merge rev: 81448d39c9a0 - test: revision 4
revision 4
>>>>>>>
# hg stat
@@ -1582,7 +1580,7 @@
true.executable=cat
# hg update -C 1
$ cat <<EOF > printargs_merge_tool
- > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > while test \$# -gt 0; do echo arg: \""\$1"\"; shift; done
> EOF
$ hg --config merge-tools.true.executable='sh' \
> --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
@@ -1594,34 +1592,7 @@
arg: "ll:working copy"
arg: "lo:"
arg: "merge rev"
- arg: "lb:base: */f~base.*" (glob)
- 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
- (branch merge, don't forget to commit)
- $ rm -f 'printargs_merge_tool'
-
-Same test with experimental.mergetempdirprefix set:
-
- $ beforemerge
- [merge-tools]
- false.whatever=
- true.priority=1
- true.executable=cat
- # hg update -C 1
- $ cat <<EOF > printargs_merge_tool
- > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
- > EOF
- $ hg --config experimental.mergetempdirprefix=$TESTTMP/hgmerge. \
- > --config merge-tools.true.executable='sh' \
- > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
- > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
- > --config ui.mergemarkertemplate='uitmpl {rev}' \
- > --config ui.mergemarkers=detailed \
- > merge -r 2
- merging f
- arg: "ll:working copy"
- arg: "lo:"
- arg: "merge rev"
- arg: "lb:base: */hgmerge.*/f~base" (glob)
+ arg: "lb:common ancestor: */f~base" (glob)
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ rm -f 'printargs_merge_tool'
@@ -1638,7 +1609,7 @@
true.executable=cat
# hg update -C 1
$ cat <<EOF > printargs_merge_tool
- > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > while test \$# -gt 0; do echo arg: \""\$1"\"; shift; done
> EOF
$ hg --config merge-tools.true.executable='sh' \
> --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
@@ -1651,7 +1622,7 @@
arg: "ll:working copy: tooltmpl ef83787e2614"
arg: "lo:"
arg: "merge rev: tooltmpl 0185f4e0cf02"
- arg: "lb:base: */f~base.*" (glob)
+ arg: "lb:common ancestor: */f~base" (glob)
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ rm -f 'printargs_merge_tool'
@@ -1666,9 +1637,9 @@
true.executable=cat
# hg update -C 1
$ cat <<EOF > mytool
- > echo labellocal: \"\$1\"
- > echo labelother: \"\$2\"
- > echo "output (arg)": \"\$3\"
+ > echo labellocal: \""\$1"\"
+ > echo labelother: \""\$2"\"
+ > echo "output (arg)": \""\$3"\"
> echo "output (contents)":
> cat "\$3"
> EOF
@@ -1704,9 +1675,9 @@
true.executable=cat
# hg update -C 1
$ cat <<EOF > mytool
- > echo labellocal: \"\$1\"
- > echo labelother: \"\$2\"
- > echo "output (arg)": \"\$3\"
+ > echo labellocal: \""\$1"\"
+ > echo labelother: \""\$2"\"
+ > echo "output (arg)": \""\$3"\"
> echo "output (contents)":
> cat "\$3"
> EOF
@@ -1837,7 +1808,6 @@
$ hg merge -y -r 2 --config ui.merge=missingbinary
couldn't find merge tool missingbinary (for pattern f)
merging f
- couldn't find merge tool missingbinary (for pattern f)
revision 1
space
revision 0
@@ -1898,23 +1868,7 @@
$ hg update -q -C 2
$ hg merge -y -r tip --tool echo --config merge-tools.echo.args='$base $local $other $output'
merging f and f.txt to f.txt
- */f~base.* */f~local.*.txt */f~other.*.txt $TESTTMP/repo/f.txt (glob)
- 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
- (branch merge, don't forget to commit)
-
-Verify naming of temporary files and that extension is preserved
-(experimental.mergetempdirprefix version):
-
- $ hg update -q -C 1
- $ hg mv f f.txt
- $ hg ci -qm "f.txt"
- warning: commit already existed in the repository!
- $ hg update -q -C 2
- $ hg merge -y -r tip --tool echo \
- > --config merge-tools.echo.args='$base $local $other $output' \
- > --config experimental.mergetempdirprefix=$TESTTMP/hgmerge.
- merging f and f.txt to f.txt
- $TESTTMP/hgmerge.*/f~base $TESTTMP/hgmerge.*/f~local.txt $TESTTMP/hgmerge.*/f~other.txt $TESTTMP/repo/f.txt (glob)
+ */hgmerge-*/f~base */hgmerge-*/f~local.txt */hgmerge-*/f~other.txt */repo/f.txt (glob)
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
@@ -2018,7 +1972,7 @@
Running merge tool for b ("*/bin/echo.exe"): (glob) (windows !)
Running merge tool for b (*/bin/echo): (glob) (no-windows !)
- local (working copy): 10:2d1f533d add binary file (#2) tip default
- - base (base): -1:00000000 default
+ - base (common ancestor): -1:00000000 default
- other (merge rev): 9:1e7ad7d7 add binary file (#1) default
merge runs here ...
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-merge-types.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge-types.t Fri Feb 18 14:27:43 2022 +0100
@@ -34,7 +34,7 @@
branchmerge: True, force: False, partial: False
ancestor: c334dc3be0da, local: 521a1e40188f+, remote: 3574f3e69b1c
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
tool internal:merge (for pattern a) can't handle symlinks
couldn't find merge tool hgmerge
no tool found to merge a
@@ -68,7 +68,7 @@
branchmerge: True, force: False, partial: False
ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool ':union' for a (binary False symlink True changedelete False)
merging a
my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da
@@ -90,7 +90,7 @@
branchmerge: True, force: False, partial: False
ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool ':merge3' for a (binary False symlink True changedelete False)
merging a
my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da
@@ -112,7 +112,7 @@
branchmerge: True, force: False, partial: False
ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool ':merge-local' for a (binary False symlink True changedelete False)
merging a
my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da
@@ -133,7 +133,7 @@
branchmerge: True, force: False, partial: False
ancestor: c334dc3be0da, local: 3574f3e69b1c+, remote: 521a1e40188f
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool ':merge-other' for a (binary False symlink True changedelete False)
merging a
my a@3574f3e69b1c+ other a@521a1e40188f ancestor a@c334dc3be0da
@@ -166,7 +166,7 @@
branchmerge: False, force: False, partial: False
ancestor: c334dc3be0da, local: c334dc3be0da+, remote: 521a1e40188f
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
(couldn't find merge tool hgmerge|tool hgmerge can't handle symlinks) (re)
no tool found to merge a
picked tool ':prompt' for a (binary False symlink True changedelete False)
@@ -343,9 +343,12 @@
$ hg merge
merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
warning: cannot merge flags for b without common ancestor - keeping local flags
merging b
+ warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
merging bx
+ warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
warning: cannot merge flags for c without common ancestor - keeping local flags
tool internal:merge (for pattern d) can't handle symlinks
no tool found to merge d
@@ -362,9 +365,6 @@
file 'h' needs to be resolved.
You can keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved.
What do you want to do? u
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
3 files updated, 0 files merged, 0 files removed, 6 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -411,9 +411,12 @@
$ hg up -Cqr1
$ hg merge
merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
warning: cannot merge flags for b without common ancestor - keeping local flags
merging b
+ warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
merging bx
+ warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
warning: cannot merge flags for c without common ancestor - keeping local flags
tool internal:merge (for pattern d) can't handle symlinks
no tool found to merge d
@@ -430,9 +433,6 @@
file 'h' needs to be resolved.
You can keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved.
What do you want to do? u
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
3 files updated, 0 files merged, 0 files removed, 6 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
--- a/tests/test-merge1.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge1.t Fri Feb 18 14:27:43 2022 +0100
@@ -349,6 +349,10 @@
aren't changed), even if none of mode, size and timestamp of them
isn't changed on the filesystem (see also issue4583).
+This test is now "best effort" as the mechanism to prevent such race are
+getting better, it get more complicated to test a specific scenario that would
+trigger it. If you see flakyness here, there is a race.
+
$ cat > $TESTTMP/abort.py <<EOF
> from __future__ import absolute_import
> # emulate aborting before "recordupdates()". in this case, files
@@ -365,13 +369,6 @@
> extensions.wrapfunction(merge, "applyupdates", applyupdates)
> EOF
- $ cat >> .hg/hgrc <<EOF
- > [fakedirstatewritetime]
- > # emulate invoking dirstate.write() via repo.status()
- > # at 2000-01-01 00:00
- > fakenow = 200001010000
- > EOF
-
(file gotten from other revision)
$ hg update -q -C 2
@@ -381,12 +378,8 @@
$ hg update -q -C 3
$ cat b
This is file b1
- $ touch -t 200001010000 b
- $ hg debugrebuildstate
-
$ cat >> .hg/hgrc <<EOF
> [extensions]
- > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
> abort = $TESTTMP/abort.py
> EOF
$ hg merge 5
@@ -394,13 +387,11 @@
[255]
$ cat >> .hg/hgrc <<EOF
> [extensions]
- > fakedirstatewritetime = !
> abort = !
> EOF
$ cat b
THIS IS FILE B5
- $ touch -t 200001010000 b
$ hg status -A b
M b
@@ -413,12 +404,10 @@
$ cat b
this is file b6
- $ touch -t 200001010000 b
- $ hg debugrebuildstate
+ $ hg status
$ cat >> .hg/hgrc <<EOF
> [extensions]
- > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
> abort = $TESTTMP/abort.py
> EOF
$ hg merge --tool internal:other 5
@@ -426,13 +415,11 @@
[255]
$ cat >> .hg/hgrc <<EOF
> [extensions]
- > fakedirstatewritetime = !
> abort = !
> EOF
$ cat b
THIS IS FILE B5
- $ touch -t 200001010000 b
$ hg status -A b
M b
--- a/tests/test-merge7.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge7.t Fri Feb 18 14:27:43 2022 +0100
@@ -86,13 +86,10 @@
ancestor: 96b70246a118, local: 50c3a7e29886+, remote: 40d11a4173a8
starting 4 threads for background file closing (?)
preserving test.txt for resolve of test.txt
- test.txt: versions differ -> m (premerge)
+ test.txt: versions differ -> m
picked tool ':merge' for test.txt (binary False symlink False changedelete False)
merging test.txt
my test.txt@50c3a7e29886+ other test.txt@40d11a4173a8 ancestor test.txt@96b70246a118
- test.txt: versions differ -> m (merge)
- picked tool ':merge' for test.txt (binary False symlink False changedelete False)
- my test.txt@50c3a7e29886+ other test.txt@40d11a4173a8 ancestor test.txt@96b70246a118
warning: conflicts while merging test.txt! (edit, then use 'hg resolve --mark')
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
--- a/tests/test-merge9.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-merge9.t Fri Feb 18 14:27:43 2022 +0100
@@ -27,8 +27,8 @@
test with the rename on the remote side
$ HGMERGE=false hg merge
merging bar
+ merging bar failed!
merging foo and baz to baz
- merging bar failed!
1 files updated, 1 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -41,8 +41,8 @@
3 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ HGMERGE=false hg merge
merging bar
+ merging bar failed!
merging baz and foo to baz
- merging bar failed!
1 files updated, 1 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
--- a/tests/test-narrow-acl.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-acl.t Fri Feb 18 14:27:43 2022 +0100
@@ -34,7 +34,7 @@
f2
Requirements should contain narrowhg
- $ cat narrowclone1/.hg/requires | grep narrowhg
+ $ hg debugrequires -R narrowclone1 | grep narrowhg
narrowhg-experimental
NarrowHG should track f1 and f2
--- a/tests/test-narrow-clone-no-ellipsis.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-clone-no-ellipsis.t Fri Feb 18 14:27:43 2022 +0100
@@ -22,7 +22,7 @@
added 40 changesets with 1 changes to 1 files
new changesets *:* (glob)
$ cd narrow
- $ cat .hg/requires | grep -v generaldelta
+ $ hg debugrequires | grep -v generaldelta
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -30,6 +30,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-narrow-clone-stream.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-clone-stream.t Fri Feb 18 14:27:43 2022 +0100
@@ -61,7 +61,7 @@
Making sure we have the correct set of requirements
- $ cat .hg/requires
+ $ hg debugrequires
dotencode (tree !)
dotencode (flat-fncache !)
dirstate-v2 (dirstate-v2 !)
@@ -72,6 +72,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
treemanifest (tree !)
@@ -86,6 +87,7 @@
fncache (flat-fncache !)
meta (tree !)
narrowspec
+ requires
undo
undo.backupfiles
undo.narrowspec
--- a/tests/test-narrow-clone.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-clone.t Fri Feb 18 14:27:43 2022 +0100
@@ -38,7 +38,7 @@
added 3 changesets with 1 changes to 1 files
new changesets *:* (glob)
$ cd narrow
- $ cat .hg/requires | grep -v generaldelta
+ $ hg debugrequires | grep -v generaldelta
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -46,6 +46,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-narrow-expanddirstate.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-expanddirstate.t Fri Feb 18 14:27:43 2022 +0100
@@ -142,7 +142,7 @@
Hunk #1 FAILED at 0
1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
abort: patch failed to apply
- [255]
+ [20]
$ hg tracked | grep patchdir
[1]
$ hg files | grep patchdir > /dev/null
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-merge-outside.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,111 @@
+===================================================================
+Test merge behavior with narrow for item outside of the narrow spec
+===================================================================
+
+This test currently check for simple "outside of narrow" merge case. I suspect
+there might be more corner case that need testing, so extending this tests, or
+replacing it by a more "generative" version, comparing behavior with and without narow.
+
+This the feature is currently working with flat manifest only. This is the only
+case tested. Consider using test-case if tree start supporting this case of
+merge.
+
+Create some initial setup
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init server
+ $ echo root > server/root
+ $ mkdir server/inside
+ $ mkdir server/outside
+ $ echo babar > server/inside/inside-change
+ $ echo pom > server/outside/outside-changing
+ $ echo arthur > server/outside/outside-removed
+ $ hg -R server add server/
+ adding server/inside/inside-change
+ adding server/outside/outside-changing
+ adding server/outside/outside-removed
+ adding server/root
+ $ hg -R server commit -m root
+
+
+
+ $ hg clone ssh://user@dummy/server client --narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets a0c415d360e5
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+"trivial" change outside of narrow spec on the server
+
+ $ echo zephir > server/outside/outside-added
+ $ hg -R server add server/outside/outside-added
+ $ echo flore > server/outside/outside-changing
+ $ hg -R server remove server/outside/outside-removed
+ $ hg -R server commit -m "outside change"
+
+Merge them with some unrelated local change
+
+ $ echo celeste > client/inside/inside-change
+ $ hg -R client commit -m "inside change"
+ $ hg -R client pull
+ pulling from ssh://user@dummy/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ new changesets f9ec5453023e
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ $ hg -R client merge
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg -R client ci -m 'merge changes'
+ $ hg -R client push -r .
+ pushing to ssh://user@dummy/server
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 2 changesets with 1 changes to 1 files
+
+Checking result
+---------------
+
+general sentry of all output
+
+ $ hg --repository server manifest --debug --rev 0
+ 360afd990eeff79e4a7f9f3ded5ecd7bc2fd3b59 644 inside/inside-change
+ 7db95ce5cd8e734ad12e3f5f37779a08070a1399 644 outside/outside-changing
+ 1591f6db41a30b68bd94ddccf4a4ce4f4fbe2a44 644 outside/outside-removed
+ 50ecbc31c0e82dd60c2747c434d1f11b85c0e178 644 root
+ $ hg --repository server manifest --debug --rev 1
+ 360afd990eeff79e4a7f9f3ded5ecd7bc2fd3b59 644 inside/inside-change
+ 486c008d6dddcaeb5e5f99556a121800cdcfb149 644 outside/outside-added
+ 153d7af5e4f53f44475bc0ff2b806c86f019eda4 644 outside/outside-changing
+ 50ecbc31c0e82dd60c2747c434d1f11b85c0e178 644 root
+
+ $ hg --repository server manifest --debug --rev 2
+ 1b3ab69c6c847abc8fd25537241fedcd4d188668 644 inside/inside-change
+ 7db95ce5cd8e734ad12e3f5f37779a08070a1399 644 outside/outside-changing
+ 1591f6db41a30b68bd94ddccf4a4ce4f4fbe2a44 644 outside/outside-removed
+ 50ecbc31c0e82dd60c2747c434d1f11b85c0e178 644 root
+ $ hg --repository server manifest --debug --rev 3
+ 1b3ab69c6c847abc8fd25537241fedcd4d188668 644 inside/inside-change
+ 486c008d6dddcaeb5e5f99556a121800cdcfb149 644 outside/outside-added
+ 153d7af5e4f53f44475bc0ff2b806c86f019eda4 644 outside/outside-changing
+ 50ecbc31c0e82dd60c2747c434d1f11b85c0e178 644 root
+
+The file changed outside should be changed by the merge
+
+ $ hg --repository server manifest --debug --rev 'desc("inside change")' | grep outside-changing
+ 7db95ce5cd8e734ad12e3f5f37779a08070a1399 644 outside/outside-changing
+
+ $ hg --repository server manifest --debug --rev 'desc("outside change")' | grep outside-changing
+ 153d7af5e4f53f44475bc0ff2b806c86f019eda4 644 outside/outside-changing
+ $ hg --repository server manifest --debug --rev 'desc("merge")' | grep outside-changing
+ 153d7af5e4f53f44475bc0ff2b806c86f019eda4 644 outside/outside-changing
--- a/tests/test-narrow-merge.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-merge.t Fri Feb 18 14:27:43 2022 +0100
@@ -83,12 +83,67 @@
TODO: Can merge non-conflicting changes outside narrow spec
$ hg update -q 'desc("modify inside/f1")'
+
+#if flat
+
$ hg merge 'desc("modify outside/f1")'
- abort: merge affects file 'outside/f1' outside narrow, which is not yet supported (flat !)
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+
+status should be clean
+
+ $ hg status
+ ? inside/f1.orig
+
+file out of the spec should still not be in the dirstate at all
+
+ $ hg debugdirstate | grep outside/f1
+ [1]
+
+Commit that merge
+
+ $ hg ci -m 'merge from outside to inside'
+
+status should be clean
+
+ $ hg status
+ ? inside/f1.orig
+
+file out of the spec should not be in the mergestate anymore
+
+ $ hg debugmergestate | grep outside/f1
+ [1]
+
+file out of the spec should still not be in the dirstate at all
+
+ $ hg debugdirstate | grep outside/f1
+ [1]
+
+The filenode used should come from p2
+
+ $ hg manifest --debug --rev . | grep outside/f1
+ 83cd11431a3b2aff8a3995e5f27bcf33cdb5be98 644 outside/f1
+ $ hg manifest --debug --rev 'p1(.)' | grep outside/f1
+ c6b956c48be2cd4fa94be16002aba311143806fa 644 outside/f1
+ $ hg manifest --debug --rev 'p2(.)' | grep outside/f1
+ 83cd11431a3b2aff8a3995e5f27bcf33cdb5be98 644 outside/f1
+
+
+remove the commit to get in the previous situation again
+
+ $ hg debugstrip -r .
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/48eb25338b19-a1bb8350-backup.hg
+
+#else
+
+ $ hg merge 'desc("modify outside/f1")'
abort: merge affects file 'outside/' outside narrow, which is not yet supported (tree !)
(merging in the other direction may work)
[255]
+#endif
+
$ hg update -q 'desc("modify outside/f1")'
$ hg merge 'desc("modify inside/f1")'
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -101,4 +156,4 @@
$ hg merge 'desc("conflicting outside/f1")'
abort: conflict in file 'outside/f1' is outside narrow clone (flat !)
abort: conflict in file 'outside/' is outside narrow clone (tree !)
- [255]
+ [20]
--- a/tests/test-narrow-rebase.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-rebase.t Fri Feb 18 14:27:43 2022 +0100
@@ -96,4 +96,4 @@
$ hg rebase -d 'desc("modify outside/f1")'
rebasing 4:707c035aadb6 "conflicting outside/f1"
abort: conflict in file 'outside/f1' is outside narrow clone
- [255]
+ [20]
--- a/tests/test-narrow-sparse.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-narrow-sparse.t Fri Feb 18 14:27:43 2022 +0100
@@ -56,7 +56,7 @@
$ test -f .hg/sparse
[1]
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -65,6 +65,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
treemanifest (tree !)
--- a/tests/test-obsmarker-template.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-obsmarker-template.t Fri Feb 18 14:27:43 2022 +0100
@@ -1762,7 +1762,7 @@
2 new obsolescence markers
obsoleted 1 changesets
new changesets 7a230b46bf61 (1 drafts)
- (run 'hg heads' to see heads, 'hg merge' to merge)
+ (run 'hg heads' to see heads)
$ hg log --hidden -G
o changeset: 2:7a230b46bf61
| tag: tip
--- a/tests/test-obsolete-distributed.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-obsolete-distributed.t Fri Feb 18 14:27:43 2022 +0100
@@ -570,7 +570,7 @@
added 2 changesets with 0 changes to 2 files (+1 heads)
(2 other changesets obsolete on arrival)
abort: cannot update to target: filtered revision '6'
- [255]
+ [10]
$ cd ..
--- a/tests/test-obsolete-tag-cache.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-obsolete-tag-cache.t Fri Feb 18 14:27:43 2022 +0100
@@ -73,11 +73,11 @@
55482a6fb4b1881fa8f746fd52cf6f096bb21c89 test1
$ hg blackbox -l 5
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 2 tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 2 tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
Hiding another changeset should cause the filtered hash to change
@@ -97,11 +97,11 @@
042eb6bfcc4909bad84a1cbf6eb1ddf0ab587d41 head2
$ hg blackbox -l 5
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 1/1 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 1/1 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
Resolving tags on an unfiltered repo writes a separate tags cache
@@ -118,8 +118,8 @@
d75775ffbc6bca1794d300f5571272879bd280da test2
$ hg blackbox -l 5
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2 with 3 tags
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2 with 3 tags
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
--- a/tests/test-obsolete.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-obsolete.t Fri Feb 18 14:27:43 2022 +0100
@@ -169,10 +169,6 @@
5:5601fb93a350 (draft) [tip ] add new_3_c
$ hg heads --hidden
5:5601fb93a350 (draft) [tip ] add new_3_c
- 4:ca819180edb9 (draft *obsolete*) [ ] add new_2_c [rewritten as 5:5601fb93a350]
- 3:cdbce2fbb163 (draft *obsolete*) [ ] add new_c [rewritten as 4:ca819180edb9]
- 2:245bde4270cd (draft *obsolete*) [ ] add original_c [rewritten as 3:cdbce2fbb163]
-
check that summary does not report them
@@ -193,7 +189,7 @@
add new_3_c
branch: default
commit: (clean)
- update: 3 new changesets, 4 branch heads (merge)
+ update: (current)
phases: 6 draft
remote: 3 outgoing
--- a/tests/test-parseindex2.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-parseindex2.py Fri Feb 18 14:27:43 2022 +0100
@@ -57,6 +57,7 @@
0,
constants.COMP_MODE_INLINE,
constants.COMP_MODE_INLINE,
+ constants.RANK_UNKNOWN,
)
nodemap[e[7]] = n
append(e)
@@ -72,6 +73,7 @@
0,
constants.COMP_MODE_INLINE,
constants.COMP_MODE_INLINE,
+ constants.RANK_UNKNOWN,
)
nodemap[e[7]] = n
append(e)
@@ -132,8 +134,8 @@
)
-def parse_index2(data, inline, revlogv2=False):
- index, chunkcache = parsers.parse_index2(data, inline, revlogv2=revlogv2)
+def parse_index2(data, inline, format=constants.REVLOGV1):
+ index, chunkcache = parsers.parse_index2(data, inline, format=format)
return list(index), chunkcache
@@ -268,6 +270,7 @@
0,
constants.COMP_MODE_INLINE,
constants.COMP_MODE_INLINE,
+ constants.RANK_UNKNOWN,
)
index, junk = parsers.parse_index2(data_inlined, True)
got = index[-1]
@@ -303,6 +306,7 @@
0,
constants.COMP_MODE_INLINE,
constants.COMP_MODE_INLINE,
+ constants.RANK_UNKNOWN,
)
index.append(e)
--- a/tests/test-permissions.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-permissions.t Fri Feb 18 14:27:43 2022 +0100
@@ -78,7 +78,7 @@
(fsmonitor makes "hg status" avoid accessing to "dir")
$ hg status
- dir: Permission denied
+ dir: Permission denied* (glob)
M a
#endif
--- a/tests/test-persistent-nodemap.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-persistent-nodemap.t Fri Feb 18 14:27:43 2022 +0100
@@ -65,6 +65,7 @@
format-variant repo
fncache: yes
dirstate-v2: no
+ tracked-hint: no
dotencode: yes
generaldelta: yes
share-safe: yes
@@ -782,9 +783,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: yes yes no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: yes no no
copies-sdc: no no no
@@ -809,8 +811,6 @@
- manifest
$ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
- 00changelog-*.nd (glob)
- 00manifest-*.nd (glob)
undo.backup.00changelog.n
undo.backup.00manifest.n
$ hg debugnodemap --metadata
@@ -826,9 +826,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: yes yes no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no yes no
copies-sdc: no no no
--- a/tests/test-phases.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-phases.t Fri Feb 18 14:27:43 2022 +0100
@@ -882,16 +882,8 @@
$ hg init no-internal-phase --config format.internal-phase=no
$ cd no-internal-phase
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
- persistent-nodemap (rust !)
- revlog-compression-zstd (zstd !)
- revlogv1
- sparserevlog
- store
+ $ hg debugrequires | grep internal-phase
+ [1]
$ echo X > X
$ hg add X
$ hg status
@@ -911,17 +903,8 @@
$ hg init internal-phase --config format.internal-phase=yes
$ cd internal-phase
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
+ $ hg debugrequires | grep internal-phase
internal-phase
- persistent-nodemap (rust !)
- revlog-compression-zstd (zstd !)
- revlogv1
- sparserevlog
- store
$ mkcommit A
test-debug-phase: new rev 0: x -> 1
test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256: -> draft
--- a/tests/test-pull-network.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-pull-network.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,15 +1,5 @@
#require serve
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
$ hg init test
$ cd test
--- a/tests/test-pull-r.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-pull-r.t Fri Feb 18 14:27:43 2022 +0100
@@ -112,7 +112,7 @@
$ hg pull -qr missing ../repo
abort: unknown revision 'missing'
- [255]
+ [10]
Pull multiple revisions with update:
--- a/tests/test-purge.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-purge.t Fri Feb 18 14:27:43 2022 +0100
@@ -29,7 +29,7 @@
$ hg st
$ touch foo
$ hg purge
- permanently delete 1 unkown files? (yN) n
+ permanently delete 1 unknown files? (yN) n
abort: removal cancelled
[250]
$ hg st
@@ -93,7 +93,7 @@
untracked_file
untracked_file_readonly
$ hg purge --confirm
- permanently delete 2 unkown files? (yN) n
+ permanently delete 2 unknown files? (yN) n
abort: removal cancelled
[250]
$ hg purge -v
@@ -156,7 +156,7 @@
$ hg purge -p ../untracked_directory
untracked_directory/nested_directory
$ hg purge --confirm
- permanently delete 1 unkown files? (yN) n
+ permanently delete 1 unknown files? (yN) n
abort: removal cancelled
[250]
$ hg purge -v ../untracked_directory
@@ -203,7 +203,7 @@
ignored
untracked_file
$ hg purge --confirm --all
- permanently delete 1 unkown and 1 ignored files? (yN) n
+ permanently delete 1 unknown and 1 ignored files? (yN) n
abort: removal cancelled
[250]
$ hg purge -v --all
--- a/tests/test-qrecord.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-qrecord.t Fri Feb 18 14:27:43 2022 +0100
@@ -117,7 +117,7 @@
$ echo "mq=nonexistent" >> $HGRCPATH
$ hg help qrecord
- *** failed to import extension mq from nonexistent: [Errno *] * (glob)
+ *** failed to import extension "mq" from nonexistent: [Errno *] * (glob)
hg qrecord [OPTION]... PATCH [FILE]...
interactively record a new patch
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rank.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,223 @@
+=============================================================
+Check that we can compute and exchange revision rank properly
+=============================================================
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > exp-use-changelog-v2=enable-unstable-format-and-corrupt-my-data
+ > EOF
+
+
+Test minimal rank computation with merge
+
+ $ hg init rank-repo-minimal
+ $ cd rank-repo-minimal
+ $ touch 0
+ $ hg commit -Aqm 0
+ $ touch 1
+ $ hg commit -Aqm 1
+ $ hg update -qr 0
+ $ touch 2
+ $ hg commit -Aqm 2
+ $ hg merge -qr 1
+ $ hg commit -m 3
+ $ touch 4
+ $ hg commit -Aqm 4
+ $ hg log --graph --template '{rev} {_fast_rank}\n'
+ @ 4 5
+ |
+ o 3 4
+ |\
+ | o 2 2
+ | |
+ o | 1 2
+ |/
+ o 0 1
+
+ $ cd ..
+
+
+Build a bigger example repo
+
+ $ hg init rank-repo-generated
+ $ cd rank-repo-generated
+ $ hg debugbuilddag '.:root1+5:mp1<root1+10:mp2/mp1+3<mp1+2:mp3/mp2$+15/mp1+4'
+ $ hg log -G -T '{desc}'
+ o r42
+ |
+ o r41
+ |
+ o r40
+ |
+ o r39
+ |
+ o r38
+ |\
+ | o r37
+ | |
+ | o r36
+ | |
+ | o r35
+ | |
+ | o r34
+ | |
+ | o r33
+ | |
+ | o r32
+ | |
+ | o r31
+ | |
+ | o r30
+ | |
+ | o r29
+ | |
+ | o r28
+ | |
+ | o r27
+ | |
+ | o r26
+ | |
+ | o r25
+ | |
+ | o r24
+ | |
+ | o r23
+ |
+ | o r22
+ | |\
+ | | o r21
+ | | |
+ +---o r20
+ | |
+ | | o r19
+ | | |
+ | | o r18
+ | | |
+ | | o r17
+ | | |
+ +---o r16
+ | |/
+ | o r15
+ | |
+ | o r14
+ | |
+ | o r13
+ | |
+ | o r12
+ | |
+ | o r11
+ | |
+ | o r10
+ | |
+ | o r9
+ | |
+ | o r8
+ | |
+ | o r7
+ | |
+ | o r6
+ | |
+ o | r5
+ | |
+ o | r4
+ | |
+ o | r3
+ | |
+ o | r2
+ | |
+ o | r1
+ |/
+ o r0
+
+
+
+Check the rank
+--------------
+
+ $ hg log -G -T '{_fast_rank}'
+ o 26
+ |
+ o 25
+ |
+ o 24
+ |
+ o 23
+ |
+ o 22
+ |\
+ | o 15
+ | |
+ | o 14
+ | |
+ | o 13
+ | |
+ | o 12
+ | |
+ | o 11
+ | |
+ | o 10
+ | |
+ | o 9
+ | |
+ | o 8
+ | |
+ | o 7
+ | |
+ | o 6
+ | |
+ | o 5
+ | |
+ | o 4
+ | |
+ | o 3
+ | |
+ | o 2
+ | |
+ | o 1
+ |
+ | o 19
+ | |\
+ | | o 8
+ | | |
+ +---o 7
+ | |
+ | | o 20
+ | | |
+ | | o 19
+ | | |
+ | | o 18
+ | | |
+ +---o 17
+ | |/
+ | o 11
+ | |
+ | o 10
+ | |
+ | o 9
+ | |
+ | o 8
+ | |
+ | o 7
+ | |
+ | o 6
+ | |
+ | o 5
+ | |
+ | o 4
+ | |
+ | o 3
+ | |
+ | o 2
+ | |
+ o | 6
+ | |
+ o | 5
+ | |
+ o | 4
+ | |
+ o | 3
+ | |
+ o | 2
+ |/
+ o 1
+
--- a/tests/test-rebase-collapse.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-collapse.t Fri Feb 18 14:27:43 2022 +0100
@@ -717,13 +717,13 @@
o 0: 4a2df7238c3b 'A'
$ cat A
- <<<<<<< dest: 82b8abf9c185 D - test: D
+ <<<<<<< dest: 82b8abf9c185 D - test: D
D
- ||||||| base
+ ||||||| parent of source: 4a2df7238c3b A - test: A
A
=======
B
- >>>>>>> source: f899f3910ce7 B - test: B
+ >>>>>>> source: f899f3910ce7 B - test: B
$ echo BC > A
$ hg resolve -m
(no more unresolved files)
@@ -745,13 +745,13 @@
o 0: 4a2df7238c3b 'A'
$ cat A
- <<<<<<< dest: 82b8abf9c185 D - test: D
+ <<<<<<< dest: 82b8abf9c185 D - test: D
BC
- ||||||| base
+ ||||||| parent of source: f899f3910ce7 B - test: B
B
=======
C
- >>>>>>> source: 63668d570d21 C tip - test: C
+ >>>>>>> source: 63668d570d21 C tip - test: C
$ echo BD > A
$ hg resolve -m
(no more unresolved files)
--- a/tests/test-rebase-conflicts.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-conflicts.t Fri Feb 18 14:27:43 2022 +0100
@@ -372,13 +372,13 @@
+++ b/a * (glob)
@@ -1,2 +1,8 @@
a
- +<<<<<<< dest: 328e4ab1f7cc ab - test: ab
+ +<<<<<<< dest: 328e4ab1f7cc ab - test: ab
b
- +||||||| base
+ +||||||| parent of source: cb9a9f314b8b - test: a
+=======
+b
+c
- +>>>>>>> source: 7bc217434fc1 - test: abc
+ +>>>>>>> source: 7bc217434fc1 - test: abc
Test rebase with obsstore turned on and off (issue5606)
--- a/tests/test-rebase-obsolete.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-obsolete.t Fri Feb 18 14:27:43 2022 +0100
@@ -650,6 +650,7 @@
$ hg add J
$ hg commit -m J
1 new orphan changesets
+ created new head
$ hg debugobsolete `hg log --rev . -T '{node}'`
1 new obsolescence markers
obsoleted 1 changesets
--- a/tests/test-rebase-obsolete2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-obsolete2.t Fri Feb 18 14:27:43 2022 +0100
@@ -47,6 +47,7 @@
$ hg add C
$ hg commit -m C
1 new orphan changesets
+ created new head
$ hg log -G
@ 4:212cb178bcbb C
|
@@ -73,6 +74,7 @@
$ hg add D
$ hg commit -m D
1 new orphan changesets
+ created new head
$ hg --hidden strip -r 'desc(B1)'
saved backup bundle to $TESTTMP/obsskip/.hg/strip-backup/86f6414ccda7-b1c452ee-backup.hg
1 new orphan changesets
@@ -194,6 +196,7 @@
$ hg add foo
$ hg commit -m "bar foo"
1 new orphan changesets
+ created new head
$ hg log -G
@ 14:73568ab6879d bar foo
|
--- a/tests/test-rebase-obsolete4.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-obsolete4.t Fri Feb 18 14:27:43 2022 +0100
@@ -169,7 +169,7 @@
D
branch: default
commit: 1 modified, 1 added, 1 unknown, 1 unresolved
- update: 1 new changesets, 2 branch heads (merge)
+ update: (current)
phases: 3 draft
rebase: 0 rebased, 2 remaining (rebase --continue)
--- a/tests/test-rebase-scenario-global.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebase-scenario-global.t Fri Feb 18 14:27:43 2022 +0100
@@ -380,7 +380,7 @@
$ hg --config format.usegeneraldelta=no init issue5678
$ cd issue5678
- $ grep generaldelta .hg/requires
+ $ hg debugrequires | grep generaldelta
[1]
$ echo a > a
$ hg ci -Aqm a
--- a/tests/test-rebuildstate.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rebuildstate.t Fri Feb 18 14:27:43 2022 +0100
@@ -79,6 +79,7 @@
$ touch foo bar qux
$ hg add qux
$ hg remove bar
+ $ sleep 1 # remove potential ambiguity in mtime
$ hg status -A
A qux
R bar
@@ -106,6 +107,7 @@
$ hg manifest
bar
foo
+ $ sleep 1 # remove potential ambiguity in mtime
$ hg status -A
A qux
R bar
--- a/tests/test-remotefilelog-bundle2-legacy.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-bundle2-legacy.t Fri Feb 18 14:27:43 2022 +0100
@@ -40,7 +40,7 @@
> EOF
$ hg init master
- $ grep generaldelta master/.hg/requires
+ $ hg debugrequires -R master | grep generaldelta
generaldelta
$ cd master
preferuncompressed = False so that we can make both generaldelta and non-generaldelta clones
--- a/tests/test-remotefilelog-bundle2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-bundle2.t Fri Feb 18 14:27:43 2022 +0100
@@ -3,7 +3,7 @@
$ . "$TESTDIR/remotefilelog-library.sh"
$ hg init master
- $ grep generaldelta master/.hg/requires
+ $ hg debugrequires -R master | grep generaldelta
generaldelta
$ cd master
preferuncompressed = False so that we can make both generaldelta and non-generaldelta clones
@@ -22,10 +22,10 @@
$ hgcloneshallow ssh://user@dummy/master shallow-generaldelta -q --pull --config experimental.bundle2-exp=True
1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
- $ grep generaldelta shallow-generaldelta/.hg/requires
+ $ hg debugrequires -R shallow-generaldelta/ | grep generaldelta
generaldelta
$ hgcloneshallow ssh://user@dummy/master shallow-plain -q --pull --config format.usegeneraldelta=False --config format.generaldelta=False --config experimental.bundle2-exp=True
- $ grep generaldelta shallow-plain/.hg/requires
+ $ hg debugrequires -R shallow-plain/ | grep generaldelta
[1]
$ cd master
--- a/tests/test-remotefilelog-clone-tree.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-clone-tree.t Fri Feb 18 14:27:43 2022 +0100
@@ -25,7 +25,7 @@
searching for changes
no changes found
$ cd shallow
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -34,6 +34,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
treemanifest
@@ -69,7 +70,7 @@
searching for changes
no changes found
$ cd shallow2
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -78,6 +79,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
treemanifest
@@ -113,7 +115,7 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ ls shallow3/.hg/store/data
- $ cat shallow3/.hg/requires
+ $ hg debugrequires -R shallow3/
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -122,6 +124,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
treemanifest
--- a/tests/test-remotefilelog-clone.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-clone.t Fri Feb 18 14:27:43 2022 +0100
@@ -22,7 +22,7 @@
searching for changes
no changes found
$ cd shallow
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -31,6 +31,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
@@ -59,7 +60,7 @@
searching for changes
no changes found
$ cd shallow2
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -68,6 +69,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
$ ls .hg/store/data
@@ -111,7 +113,7 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ ls shallow3/.hg/store/data
- $ cat shallow3/.hg/requires
+ $ hg debugrequires -R shallow3/
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -120,5 +122,6 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
--- a/tests/test-remotefilelog-log.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-log.t Fri Feb 18 14:27:43 2022 +0100
@@ -25,7 +25,7 @@
searching for changes
no changes found
$ cd shallow
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
exp-remotefilelog-repo-req-1
@@ -34,6 +34,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
--- a/tests/test-remotefilelog-repack.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-remotefilelog-repack.t Fri Feb 18 14:27:43 2022 +0100
@@ -307,7 +307,7 @@
1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
$ hg prefetch -r 38
abort: unknown revision '38'
- [255]
+ [10]
$ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
-r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
$ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
--- a/tests/test-rename-merge1.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rename-merge1.t Fri Feb 18 14:27:43 2022 +0100
@@ -44,7 +44,7 @@
getting b2
preserving a for resolve of b
removing a
- b: remote moved from a -> m (premerge)
+ b: remote moved from a -> m
picked tool ':merge' for b (binary False symlink False changedelete False)
merging a and b to b
my b@044f8520aeeb+ other b@85c198ef2f6c ancestor a@af1939970a1c
@@ -218,7 +218,7 @@
ancestor: 5151c134577e, local: 07fcbc9a74ed+, remote: f21419739508
starting 4 threads for background file closing (?)
preserving z for resolve of z
- z: both renamed from y -> m (premerge)
+ z: both renamed from y -> m
picked tool ':merge3' for z (binary False symlink False changedelete False)
merging z
my z@07fcbc9a74ed+ other z@f21419739508 ancestor y@5151c134577e
--- a/tests/test-rename-merge2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rename-merge2.t Fri Feb 18 14:27:43 2022 +0100
@@ -88,18 +88,15 @@
starting 4 threads for background file closing (?)
preserving a for resolve of b
preserving rev for resolve of rev
- b: remote copied from a -> m (premerge)
+ b: remote copied from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging a and b to b
my b@e300d1c794ec+ other b@4ce40f5aca24 ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@e300d1c794ec+ other rev@4ce40f5aca24 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@e300d1c794ec+ other rev@4ce40f5aca24 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -128,18 +125,15 @@
getting a
preserving b for resolve of b
preserving rev for resolve of rev
- b: local copied/moved from a -> m (premerge)
+ b: local copied/moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b and a to b
my b@86a2aa42fc76+ other a@f4db7e329e71 ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@f4db7e329e71 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@86a2aa42fc76+ other rev@f4db7e329e71 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -168,18 +162,15 @@
preserving a for resolve of b
preserving rev for resolve of rev
removing a
- b: remote moved from a -> m (premerge)
+ b: remote moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging a and b to b
my b@e300d1c794ec+ other b@bdb19105162a ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@e300d1c794ec+ other rev@bdb19105162a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@e300d1c794ec+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -206,18 +197,15 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: local copied/moved from a -> m (premerge)
+ b: local copied/moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b and a to b
my b@02963e448370+ other a@f4db7e329e71 ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@f4db7e329e71 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@02963e448370+ other rev@f4db7e329e71 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -244,13 +232,10 @@
b: remote created -> g
getting b
preserving rev for resolve of rev
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@94b33a1b7f2d+ other rev@4ce40f5aca24 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@94b33a1b7f2d+ other rev@4ce40f5aca24 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 1 files merged, 0 files removed, 0 files unresolved
@@ -276,13 +261,10 @@
ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 97c705ade336
starting 4 threads for background file closing (?)
preserving rev for resolve of rev
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@97c705ade336 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@86a2aa42fc76+ other rev@97c705ade336 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
@@ -311,13 +293,10 @@
b: remote created -> g
getting b
preserving rev for resolve of rev
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@94b33a1b7f2d+ other rev@bdb19105162a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@94b33a1b7f2d+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 1 files merged, 1 files removed, 0 files unresolved
@@ -342,13 +321,10 @@
ancestor: 924404dff337, local: 02963e448370+, remote: 97c705ade336
starting 4 threads for background file closing (?)
preserving rev for resolve of rev
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@97c705ade336 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@02963e448370+ other rev@97c705ade336 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
@@ -374,22 +350,16 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@62e7bf090eba+ other b@49b6d8032493 ancestor a@924404dff337
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@62e7bf090eba+ other rev@49b6d8032493 ancestor rev@924404dff337
- b: both renamed from a -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@62e7bf090eba+ other b@49b6d8032493 ancestor a@924404dff337
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@62e7bf090eba+ other rev@49b6d8032493 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -425,13 +395,10 @@
c: remote created -> g
getting c
preserving rev for resolve of rev
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@fe905ef2c33e ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@02963e448370+ other rev@fe905ef2c33e ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 1 files merged, 0 files removed, 0 files unresolved
@@ -456,22 +423,16 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both created -> m (premerge)
+ b: both created -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@86a2aa42fc76+ other b@af30c7647fc7 ancestor b@000000000000
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@af30c7647fc7 ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@86a2aa42fc76+ other b@af30c7647fc7 ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@86a2aa42fc76+ other rev@af30c7647fc7 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -498,22 +459,16 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both created -> m (premerge)
+ b: both created -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 1 files removed, 0 files unresolved
@@ -538,18 +493,15 @@
getting a
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -576,22 +528,16 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both created -> m (premerge)
+ b: both created -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337
- b: both created -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@59318016310c+ other b@bdb19105162a ancestor b@000000000000
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@59318016310c+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 1 files removed, 0 files unresolved
@@ -616,18 +562,15 @@
getting a
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@86a2aa42fc76+ other b@8dbce441892a ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@86a2aa42fc76+ other rev@8dbce441892a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -652,18 +595,15 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@0b76e65c8289+ other b@4ce40f5aca24 ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@0b76e65c8289+ other rev@4ce40f5aca24 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -688,18 +628,15 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@02963e448370+ other b@8dbce441892a ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@02963e448370+ other rev@8dbce441892a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -723,18 +660,15 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: both renamed from a -> m (premerge)
+ b: both renamed from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b
my b@0b76e65c8289+ other b@bdb19105162a ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@0b76e65c8289+ other rev@bdb19105162a ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -762,22 +696,16 @@
preserving a for resolve of b
preserving rev for resolve of rev
removing a
- b: remote moved from a -> m (premerge)
+ b: remote moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging a and b to b
my b@e300d1c794ec+ other b@49b6d8032493 ancestor a@924404dff337
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@e300d1c794ec+ other rev@49b6d8032493 ancestor rev@924404dff337
- b: remote moved from a -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@e300d1c794ec+ other b@49b6d8032493 ancestor a@924404dff337
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@e300d1c794ec+ other rev@49b6d8032493 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -804,22 +732,16 @@
starting 4 threads for background file closing (?)
preserving b for resolve of b
preserving rev for resolve of rev
- b: local copied/moved from a -> m (premerge)
+ b: local copied/moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b and a to b
my b@62e7bf090eba+ other a@f4db7e329e71 ancestor a@924404dff337
- rev: versions differ -> m (premerge)
+ launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
+ merge tool returned: 0
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@62e7bf090eba+ other rev@f4db7e329e71 ancestor rev@924404dff337
- b: local copied/moved from a -> m (merge)
- picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
- my b@62e7bf090eba+ other a@f4db7e329e71 ancestor a@924404dff337
- launching merge tool: * ../merge *$TESTTMP/t/t/b* * * (glob)
- merge tool returned: 0
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@62e7bf090eba+ other rev@f4db7e329e71 ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
0 files updated, 2 files merged, 0 files removed, 0 files unresolved
@@ -852,18 +774,15 @@
getting c
preserving b for resolve of b
preserving rev for resolve of rev
- b: local copied/moved from a -> m (premerge)
+ b: local copied/moved from a -> m
picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
merging b and a to b
my b@02963e448370+ other a@2b958612230f ancestor a@924404dff337
premerge successful
- rev: versions differ -> m (premerge)
+ rev: versions differ -> m
picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
merging rev
my rev@02963e448370+ other rev@2b958612230f ancestor rev@924404dff337
- rev: versions differ -> m (merge)
- picked tool '* ../merge' for rev (binary False symlink False changedelete False) (glob)
- my rev@02963e448370+ other rev@2b958612230f ancestor rev@924404dff337
launching merge tool: * ../merge *$TESTTMP/t/t/rev* * * (glob)
merge tool returned: 0
1 files updated, 2 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-rename.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rename.t Fri Feb 18 14:27:43 2022 +0100
@@ -610,7 +610,7 @@
$ hg rename d1/d11/a1 .hg/foo
abort: path contains illegal component: .hg/foo
- [255]
+ [10]
$ hg status -C
$ hg rename d1/d11/a1 ../foo
abort: ../foo not under root '$TESTTMP'
@@ -620,7 +620,7 @@
$ mv d1/d11/a1 .hg/foo
$ hg rename --after d1/d11/a1 .hg/foo
abort: path contains illegal component: .hg/foo
- [255]
+ [10]
$ hg status -C
! d1/d11/a1
$ hg update -C
@@ -629,11 +629,11 @@
$ hg rename d1/d11/a1 .hg
abort: path contains illegal component: .hg/a1
- [255]
+ [10]
$ hg --config extensions.largefiles= rename d1/d11/a1 .hg
The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
abort: path contains illegal component: .hg/a1
- [255]
+ [10]
$ hg status -C
$ hg rename d1/d11/a1 ..
abort: ../a1 not under root '$TESTTMP'
@@ -647,7 +647,7 @@
$ mv d1/d11/a1 .hg
$ hg rename --after d1/d11/a1 .hg
abort: path contains illegal component: .hg/a1
- [255]
+ [10]
$ hg status -C
! d1/d11/a1
$ hg update -C
@@ -656,7 +656,7 @@
$ (cd d1/d11; hg rename ../../d2/b ../../.hg/foo)
abort: path contains illegal component: .hg/foo
- [255]
+ [10]
$ hg status -C
$ (cd d1/d11; hg rename ../../d2/b ../../../foo)
abort: ../../../foo not under root '$TESTTMP'
--- a/tests/test-repo-compengines.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-repo-compengines.t Fri Feb 18 14:27:43 2022 +0100
@@ -9,16 +9,8 @@
$ hg init default
$ cd default
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
- persistent-nodemap (rust !)
- revlogv1
- sparserevlog
- store
- testonly-simplestore (reposimplestore !)
+ $ hg debugrequires | grep compression
+ [1]
$ touch foo
$ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
@@ -59,16 +51,8 @@
$ touch bar
$ hg --config format.revlog-compression=none -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
- persistent-nodemap (rust !)
- revlogv1
- sparserevlog
- store
- testonly-simplestore (reposimplestore !)
+ $ hg debugrequires | grep compression
+ [1]
$ hg debugrevlog -c | grep 0x78
0x78 (x) : 2 (100.00%)
@@ -79,17 +63,8 @@
$ hg --config format.revlog-compression=zstd init zstd
$ cd zstd
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
- persistent-nodemap (rust !)
+ $ hg debugrequires | grep compression
revlog-compression-zstd
- revlogv1
- sparserevlog
- store
- testonly-simplestore (reposimplestore !)
$ touch foo
$ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
@@ -183,17 +158,8 @@
summary: some-commit
- $ cat none-compression/.hg/requires
- dotencode
+ $ hg debugrequires -R none-compression/ | grep compression
exp-compression-none
- dirstate-v2 (dirstate-v2 !)
- fncache
- generaldelta
- persistent-nodemap (rust !)
- revlogv1
- sparserevlog
- store
- testonly-simplestore (reposimplestore !)
$ $RUNTESTDIR/f -s none-compression/.hg/store/data/*
none-compression/.hg/store/data/a.i: size=4216
--- a/tests/test-requires.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-requires.t Fri Feb 18 14:27:43 2022 +0100
@@ -57,6 +57,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
$ hg -R supported status
--- a/tests/test-resolve.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-resolve.t Fri Feb 18 14:27:43 2022 +0100
@@ -196,8 +196,8 @@
resolve --all should re-merge all unresolved files
$ hg resolve --all
merging file1
+ warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
merging file2
- warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
[1]
$ cat file1.orig
@@ -211,8 +211,8 @@
$ hg resolve --all --verbose --config 'ui.origbackuppath=.hg/origbackups'
merging file1
creating directory: $TESTTMP/repo/.hg/origbackups
+ warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
merging file2
- warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
[1]
$ ls .hg/origbackups
@@ -478,10 +478,10 @@
$ hg rebase -s 1 -d 2
rebasing 1:f30f98a8181f "added emp1 emp2 emp3"
merging emp1
+ warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
merging emp2
+ warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
merging emp3
- warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[240]
@@ -490,10 +490,10 @@
===========================================================
$ hg resolve --all
merging emp1
+ warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
merging emp2
+ warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
merging emp3
- warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
[1]
@@ -522,10 +522,10 @@
> EOF
re-merge all unresolved files (yn)? y
merging emp1
+ warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
merging emp2
+ warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
merging emp3
- warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
- warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
[1]
--- a/tests/test-revert.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-revert.t Fri Feb 18 14:27:43 2022 +0100
@@ -320,7 +320,7 @@
$ hg mv --force a b/b
$ hg revert b/b
- $ hg status a b/b
+ $ hg status a b/b --copies
$ cd ..
--- a/tests/test-revlog-raw.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-revlog-raw.py Fri Feb 18 14:27:43 2022 +0100
@@ -325,7 +325,7 @@
rawtext = text
if rlog.rawsize(rev) != len(rawtext):
abort('rev %d: wrong rawsize' % rev)
- if rlog.revision(rev, raw=False) != text:
+ if rlog.revision(rev) != text:
abort('rev %d: wrong text' % rev)
if rlog.rawdata(rev) != rawtext:
abort('rev %d: wrong rawtext' % rev)
--- a/tests/test-revlog-v2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-revlog-v2.t Fri Feb 18 14:27:43 2022 +0100
@@ -20,16 +20,9 @@
$ hg init new-repo
$ cd new-repo
- $ cat .hg/requires
- dotencode
- dirstate-v2 (dirstate-v2 !)
+ $ hg debugrequires | grep revlogv2
exp-revlogv2.2
- fncache
- generaldelta
- persistent-nodemap (rust !)
- revlog-compression-zstd (zstd !)
- sparserevlog
- store
+ dirstate-v2 (dirstate-v2 !)
$ hg log
--- a/tests/test-revset.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-revset.t Fri Feb 18 14:27:43 2022 +0100
@@ -306,7 +306,7 @@
(negate
(symbol 'a')))
abort: unknown revision '-a'
- [255]
+ [10]
$ try é
(symbol '\xc3\xa9')
* set:
--- a/tests/test-revset2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-revset2.t Fri Feb 18 14:27:43 2022 +0100
@@ -870,7 +870,7 @@
$ try m
(symbol 'm')
abort: unknown revision 'm'
- [255]
+ [10]
$ HGPLAINEXCEPT=revsetalias
$ export HGPLAINEXCEPT
@@ -1061,7 +1061,7 @@
(symbol 'max')
(string '$1')))
abort: unknown revision '$1'
- [255]
+ [10]
test scope of alias expansion: 'universe' is expanded prior to 'shadowall(0)',
but 'all()' should never be substituted to '0()'.
@@ -1601,7 +1601,7 @@
> EOF
$ hg debugrevspec "custom1()"
- *** failed to import extension custompredicate from $TESTTMP/custompredicate.py: intentional failure of loading extension
+ *** failed to import extension "custompredicate" from $TESTTMP/custompredicate.py: intentional failure of loading extension
hg: parse error: unknown identifier: custom1
[10]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rhg-no-generaldelta.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,46 @@
+ $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > sparse-revlog = no
+ > EOF
+
+ $ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
+ $ cd repo
+ $ (echo header; seq.py 20) > f
+ $ hg commit -q -Am initial
+ $ (echo header; seq.py 20; echo footer) > f
+ $ hg commit -q -Am x
+ $ hg update ".^"
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ (seq.py 20; echo footer) > f
+ $ hg commit -q -Am y
+ $ hg debugdeltachain f --template '{rev} {prevrev} {deltatype}\n'
+ 0 -1 base
+ 1 0 prev
+ 2 1 prev
+
+rhg works on non-generaldelta revlogs:
+
+ $ $NO_FALLBACK hg cat f -r .
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ footer
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rhg-sparse-narrow.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,120 @@
+#require rhg
+
+ $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
+
+Rhg works well when sparse working copy is enabled.
+
+ $ cd "$TESTTMP"
+ $ hg init repo-sparse
+ $ cd repo-sparse
+ $ cat > .hg/hgrc <<EOF
+ > [extensions]
+ > sparse=
+ > EOF
+
+ $ echo a > show
+ $ echo x > hide
+ $ mkdir dir1 dir2
+ $ echo x > dir1/x
+ $ echo y > dir1/y
+ $ echo z > dir2/z
+
+ $ hg ci -Aqm 'initial'
+ $ hg debugsparse --include 'show'
+ $ ls -A
+ .hg
+ show
+
+ $ tip=$(hg log -r . --template '{node}')
+ $ $NO_FALLBACK rhg files -r "$tip"
+ dir1/x
+ dir1/y
+ dir2/z
+ hide
+ show
+ $ $NO_FALLBACK rhg files
+ show
+
+ $ $NO_FALLBACK rhg cat -r "$tip" hide
+ x
+
+ $ cd ..
+
+We support most things when narrow is enabled, too, with a couple of caveats.
+
+ $ . "$TESTDIR/narrow-library.sh"
+ $ real_hg=$RHG_FALLBACK_EXECUTABLE
+
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > narrow=
+ > EOF
+
+ $ hg clone --narrow ./repo-sparse repo-narrow --include dir1
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 2 changes to 2 files
+ new changesets 6d714a4a2998
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd repo-narrow
+
+ $ $NO_FALLBACK rhg cat -r "$tip" dir1/x
+ x
+ $ "$real_hg" cat -r "$tip" dir1/x
+ x
+
+TODO: bad error message
+
+ $ $NO_FALLBACK rhg cat -r "$tip" hide
+ abort: invalid revision identifier: 6d714a4a2998cbfd0620db44da58b749f6565d63
+ [255]
+ $ "$real_hg" cat -r "$tip" hide
+ [1]
+
+A naive implementation of [rhg files] leaks the paths that are supposed to be
+hidden by narrow, so we just fall back to hg.
+
+ $ $NO_FALLBACK rhg files -r "$tip"
+ unsupported feature: rhg files -r <rev> is not supported in narrow clones
+ [252]
+ $ "$real_hg" files -r "$tip"
+ dir1/x
+ dir1/y
+
+Hg status needs to do some filtering based on narrow spec, so we don't
+support it in rhg for narrow clones yet.
+
+ $ mkdir dir2
+ $ touch dir2/q
+ $ "$real_hg" status
+ $ $NO_FALLBACK rhg --config rhg.status=true status
+ unsupported feature: rhg status is not supported for sparse checkouts or narrow clones yet
+ [252]
+
+Adding "orphaned" index files:
+
+ $ (cd ..; cp repo-sparse/.hg/store/data/hide.i repo-narrow/.hg/store/data/hide.i)
+ $ (cd ..; mkdir repo-narrow/.hg/store/data/dir2; cp repo-sparse/.hg/store/data/dir2/z.i repo-narrow/.hg/store/data/dir2/z.i)
+ $ "$real_hg" verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ checked 1 changesets with 2 changes to 2 files
+
+ $ "$real_hg" files -r "$tip"
+ dir1/x
+ dir1/y
+
+# TODO: even though [hg files] hides the orphaned dir2/z, [hg cat] still shows it.
+# rhg has the same issue, but at least it's not specific to rhg.
+# This is despite [hg verify] succeeding above.
+
+ $ $NO_FALLBACK rhg cat -r "$tip" dir2/z
+ z
+ $ "$real_hg" cat -r "$tip" dir2/z
+ z
--- a/tests/test-rhg.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-rhg.t Fri Feb 18 14:27:43 2022 +0100
@@ -168,13 +168,12 @@
$ rhg cat original --exclude="*.rs"
original content
- $ FALLBACK_EXE="$RHG_FALLBACK_EXECUTABLE"
- $ unset RHG_FALLBACK_EXECUTABLE
- $ rhg cat original --exclude="*.rs"
+ $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original --exclude="*.rs")
abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
[255]
- $ RHG_FALLBACK_EXECUTABLE="$FALLBACK_EXE"
- $ export RHG_FALLBACK_EXECUTABLE
+
+ $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original)
+ original content
$ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false
[1]
@@ -244,6 +243,7 @@
persistent-nodemap
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
@@ -368,9 +368,9 @@
$ echo "maxsize = 1" >> $HGRCPATH
$ $NO_FALLBACK rhg files > /dev/null
$ cat .hg/blackbox.log
- ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after 0.??? seconds (glob)
+ ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after 0.??? seconds (glob)
$ cat .hg/blackbox.log.1
- ????/??/?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
+ ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
Subrepos are not supported
@@ -381,3 +381,13 @@
$ rhg files
a
$ rm .hgsub
+
+The `:required` extension suboptions are correctly ignored
+
+ $ echo "[extensions]" >> $HGRCPATH
+ $ echo "blackbox:required = yes" >> $HGRCPATH
+ $ rhg files
+ a
+ $ echo "*:required = yes" >> $HGRCPATH
+ $ rhg files
+ a
--- a/tests/test-run-tests.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-run-tests.t Fri Feb 18 14:27:43 2022 +0100
@@ -21,7 +21,7 @@
error paths
#if symlink
- $ ln -s `which true` hg
+ $ ln -s "/"bin"/"true hg
$ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
^warning: --with-hg should specify an hg script, not: (true|coreutils)$ (re)
running 0 tests using 0 parallel processes
@@ -176,14 +176,19 @@
running 1 tests using 1 parallel processes
\x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
- \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
+ \x1b[38;5;28m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc) (pygments211 !)
+ \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc) (no-pygments211 !)
\x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
- $ echo "bar-baz"; echo "bar-bad"; echo foo
- \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
- bar*bad (glob)
+ \x1b[38;5;250m \x1b[39m $ echo "bar-baz"; echo "bar-bad"; echo foo (esc) (pygments211 !)
+ $ echo "bar-baz"; echo "bar-bad"; echo foo (no-pygments211 !)
+ \x1b[38;5;28m+ bar*baz (glob)\x1b[39m (esc) (pygments211 !)
+ \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc) (no-pygments211 !)
+ \x1b[38;5;250m \x1b[39m bar*bad (glob) (esc) (pygments211 !)
+ bar*bad (glob) (no-pygments211 !)
\x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
\x1b[38;5;124m- | fo (re)\x1b[39m (esc)
- \x1b[38;5;34m+ foo\x1b[39m (esc)
+ \x1b[38;5;28m+ foo\x1b[39m (esc) (pygments211 !)
+ \x1b[38;5;34m+ foo\x1b[39m (esc) (no-pygments211 !)
\x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
!
--- a/tests/test-share-safe.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-share-safe.t Fri Feb 18 14:27:43 2022 +0100
@@ -244,7 +244,8 @@
$ echo "use-persistent-nodemap=True" >> .hg/hgrc
$ hg debugupgraderepo --run -q -R ../shared1
- abort: cannot upgrade repository; unsupported source requirement: shared
+ abort: cannot use these actions on a share repository: persistent-nodemap
+ (upgrade the main repository directly)
[255]
$ hg debugupgraderepo --run -q
@@ -363,10 +364,7 @@
preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
added: share-safe
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
$ hg debugupgraderepo --run
upgrade will perform the following actions:
@@ -379,10 +377,7 @@
share-safe
Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
@@ -457,10 +452,7 @@
preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
removed: share-safe
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
$ hg debugupgraderepo --run
upgrade will perform the following actions:
@@ -470,10 +462,7 @@
preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
removed: share-safe
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
@@ -556,10 +545,7 @@
preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
added: share-safe
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
$ hg debugrequirements
--- a/tests/test-share.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-share.t Fri Feb 18 14:27:43 2022 +0100
@@ -161,7 +161,7 @@
$ cd ..
$ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
- $ cat ./cloned-via-bundle2/.hg/requires | grep "shared"
+ $ hg -R cloned-via-bundle2 debugrequires | grep "shared"
[1]
$ hg id --cwd cloned-via-bundle2 -r tip
c2e0ac586386 tip
@@ -173,7 +173,7 @@
$ test -d .hg/store
$ test -f .hg/sharedpath
[1]
- $ grep shared .hg/requires
+ $ hg debugrequires | grep shared
[1]
$ hg unshare
abort: this is not a shared repo
@@ -208,10 +208,11 @@
$ hg share -U --relative thisdir/abs thisdir/rel
$ cat thisdir/rel/.hg/sharedpath
../../orig/.hg (no-eol)
- $ grep shared thisdir/*/.hg/requires
- thisdir/abs/.hg/requires:shared
- thisdir/rel/.hg/requires:relshared
- thisdir/rel/.hg/requires:shared
+ $ hg debugrequires -R thisdir/abs/ | grep shared
+ shared
+ $ hg debugrequires -R thisdir/rel/ | grep shared
+ relshared
+ shared
test that relative shared paths aren't relative to $PWD
@@ -241,7 +242,7 @@
$ test -d .hg/store
$ test -f .hg/sharedpath
[1]
- $ grep shared .hg/requires
+ $ hg debugrequires | grep shared
[1]
$ hg unshare
abort: this is not a shared repo
@@ -284,3 +285,25 @@
$ hg share nostore sharednostore
abort: cannot create shared repository as source was created with 'format.usestore' config disabled
[255]
+
+Check that (safe) share can control wc-specific format variant at creation time
+-------------------------------------------------------------------------------
+
+#if no-rust
+
+ $ cat << EOF >> $HGRCPATH
+ > [storage]
+ > dirstate-v2.slow-path = allow
+ > EOF
+
+#endif
+
+ $ hg init repo-safe-d1 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=no
+ $ hg debugformat -R repo-safe-d1 | grep dirstate-v2
+ dirstate-v2: no
+
+ $ hg share repo-safe-d1 share-safe-d2 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=yes
+ updating working directory
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg debugformat -R share-safe-d2 | grep dirstate-v2
+ dirstate-v2: yes
--- a/tests/test-shelve.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-shelve.t Fri Feb 18 14:27:43 2022 +0100
@@ -419,11 +419,11 @@
+++ b/a/a
@@ -1,2 +1,6 @@
a
- +<<<<<<< working-copy: 2377350b6337 - shelve: pending changes temporary commit
+ +<<<<<<< working-copy: 2377350b6337 - shelve: pending changes temporary commit
c
+=======
+a
- +>>>>>>> shelve: 203c9f771d2b - shelve: changes to: [mq]: second.patch
+ +>>>>>>> shelved change: 203c9f771d2b - shelve: changes to: [mq]: second.patch
diff --git a/b/b b/b.rename/b
rename from b/b
rename to b.rename/b
@@ -1385,8 +1385,8 @@
unshelving change 'default-01'
rebasing shelved changes
merging bar1
+ warning: conflicts while merging bar1! (edit, then use 'hg resolve --mark')
merging bar2
- warning: conflicts while merging bar1! (edit, then use 'hg resolve --mark')
warning: conflicts while merging bar2! (edit, then use 'hg resolve --mark')
unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
[240]
--- a/tests/test-shelve2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-shelve2.t Fri Feb 18 14:27:43 2022 +0100
@@ -253,11 +253,11 @@
M f
? f.orig
$ cat f
- <<<<<<< working-copy: d44eae5c3d33 - shelve: pending changes temporary commit
+ <<<<<<< working-copy: d44eae5c3d33 - shelve: pending changes temporary commit
g
=======
f
- >>>>>>> shelve: aef214a5229c - shelve: changes to: commit stuff
+ >>>>>>> shelved change: aef214a5229c - shelve: changes to: commit stuff
$ cat f.orig
g
$ hg unshelve --abort -t false
@@ -295,11 +295,11 @@
M f
? f.orig
$ cat f
- <<<<<<< working-copy: 6b563750f973 - test: intermediate other change
+ <<<<<<< working-copy: 6b563750f973 - test: intermediate other change
g
=======
f
- >>>>>>> shelve: aef214a5229c - shelve: changes to: commit stuff
+ >>>>>>> shelved change: aef214a5229c - shelve: changes to: commit stuff
$ cat f.orig
g
@@ -986,9 +986,9 @@
[240]
$ cat foo
r0
- <<<<<<< working-copy: 0b2fcf2a90e9 - shelve: pending changes temporary commit
+ <<<<<<< working-copy: 0b2fcf2a90e9 - shelve: pending changes temporary commit
this is in wdir, conflicts with shelve
=======
this will be shelved
- >>>>>>> shelve: 9c072a2163db - shelve: changes to: r0
+ >>>>>>> shelved change: 9c072a2163db - shelve: changes to: r0
$ cd ..
--- a/tests/test-sidedata.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sidedata.t Fri Feb 18 14:27:43 2022 +0100
@@ -49,78 +49,22 @@
-------------------------------------
$ hg init up-no-side-data --config experimental.revlogv2=no
- $ hg debugformat -v -R up-no-side-data
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
- copies-sdc: no no no
+ $ hg debugformat -v -R up-no-side-data | egrep 'changelog-v2|revlog-v2'
revlog-v2: no no no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
- $ hg debugformat -v -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
- copies-sdc: no no no
+ $ hg debugformat -v -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data | egrep 'changelog-v2|revlog-v2'
revlog-v2: no yes no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
$ hg debugupgraderepo -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data > /dev/null
Check that we can downgrade from sidedata
-----------------------------------------
$ hg init up-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
- $ hg debugformat -v -R up-side-data
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
- copies-sdc: no no no
+ $ hg debugformat -v -R up-side-data | egrep 'changelog-v2|revlog-v2'
revlog-v2: yes no no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
- $ hg debugformat -v -R up-side-data --config experimental.revlogv2=no
- format-variant repo config default
- fncache: yes yes yes
- dirstate-v2: no no no
- dotencode: yes yes yes
- generaldelta: yes yes yes
- share-safe: no no no
- sparserevlog: yes yes yes
- persistent-nodemap: no no no (no-rust !)
- persistent-nodemap: yes yes no (rust !)
- copies-sdc: no no no
+ $ hg debugformat -v -R up-side-data --config experimental.revlogv2=no | egrep 'changelog-v2|revlog-v2'
revlog-v2: yes no no
changelog-v2: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib (no-zstd !)
- compression: zstd zstd zstd (zstd !)
- compression-level: default default default
$ hg debugupgraderepo -R up-side-data --config experimental.revlogv2=no > /dev/null
--- a/tests/test-simplemerge.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-simplemerge.py Fri Feb 18 14:27:43 2022 +0100
@@ -48,9 +48,6 @@
)
-CantReprocessAndShowBase = simplemerge.CantReprocessAndShowBase
-
-
def split_lines(t):
return util.stringio(t).readlines()
@@ -179,7 +176,9 @@
self.assertEqual(list(m3.merge_regions()), [(b'a', 0, 2)])
- self.assertEqual(list(m3.merge_lines()), [b'aaa', b'bbb'])
+ self.assertEqual(
+ simplemerge.render_minimized(m3), ([b'aaa', b'bbb'], False)
+ )
def test_no_conflicts(self):
"""No conflicts because only one side changed"""
@@ -204,7 +203,9 @@
[b'aaa\n', b'bbb\n'],
)
- self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
+ self.assertEqual(
+ b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n'
+ )
def test_append_b(self):
m3 = Merge3(
@@ -213,7 +214,9 @@
[b'aaa\n', b'bbb\n', b'222\n'],
)
- self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
+ self.assertEqual(
+ b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n'
+ )
def test_append_agreement(self):
m3 = Merge3(
@@ -222,7 +225,9 @@
[b'aaa\n', b'bbb\n', b'222\n'],
)
- self.assertEqual(b''.join(m3.merge_lines()), b'aaa\nbbb\n222\n')
+ self.assertEqual(
+ b''.join(simplemerge.render_minimized(m3)[0]), b'aaa\nbbb\n222\n'
+ )
def test_append_clash(self):
m3 = Merge3(
@@ -231,7 +236,8 @@
[b'aaa\n', b'bbb\n', b'333\n'],
)
- ml = m3.merge_lines(
+ ml, conflicts = simplemerge.render_minimized(
+ m3,
name_a=b'a',
name_b=b'b',
start_marker=b'<<',
@@ -250,7 +256,8 @@
[b'aaa\n', b'222\n', b'bbb\n'],
)
- ml = m3.merge_lines(
+ ml, conflicts = simplemerge.render_minimized(
+ m3,
name_a=b'a',
name_b=b'b',
start_marker=b'<<',
@@ -285,12 +292,13 @@
list(m3.merge_groups()),
[
(b'unchanged', [b'aaa\n']),
- (b'conflict', [], [b'111\n'], [b'222\n']),
+ (b'conflict', ([], [b'111\n'], [b'222\n'])),
(b'unchanged', [b'bbb\n']),
],
)
- ml = m3.merge_lines(
+ ml, conflicts = simplemerge.render_minimized(
+ m3,
name_a=b'a',
name_b=b'b',
start_marker=b'<<',
@@ -338,7 +346,7 @@
def test_merge_poem(self):
"""Test case from diff3 manual"""
m3 = Merge3(TZU, LAO, TAO)
- ml = list(m3.merge_lines(b'LAO', b'TAO'))
+ ml, conflicts = simplemerge.render_minimized(m3, b'LAO', b'TAO')
self.log(b'merge result:')
self.log(b''.join(ml))
self.assertEqual(ml, MERGED_RESULT)
@@ -356,11 +364,11 @@
other_text.splitlines(True),
this_text.splitlines(True),
)
- m_lines = m3.merge_lines(b'OTHER', b'THIS')
+ m_lines, conflicts = simplemerge.render_minimized(m3, b'OTHER', b'THIS')
self.assertEqual(
b'<<<<<<< OTHER\r\nc\r\n=======\r\nb\r\n'
b'>>>>>>> THIS\r\n'.splitlines(True),
- list(m_lines),
+ m_lines,
)
def test_mac_text(self):
@@ -372,11 +380,11 @@
other_text.splitlines(True),
this_text.splitlines(True),
)
- m_lines = m3.merge_lines(b'OTHER', b'THIS')
+ m_lines, conflicts = simplemerge.render_minimized(m3, b'OTHER', b'THIS')
self.assertEqual(
b'<<<<<<< OTHER\rc\r=======\rb\r'
b'>>>>>>> THIS\r'.splitlines(True),
- list(m_lines),
+ m_lines,
)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-single-head-obsolescence-named-branch-A1.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,114 @@
+=========================================
+Testing single head enforcement: Case A-1
+=========================================
+
+A repository is set to only accept a single head per name (typically named
+branch). However, obsolete changesets can make this enforcement more
+complicated, because they can be kept visible by other changeset on other
+branch.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: Involving obsolescence
+TestCase 1: A fully obsolete branch kept visible by another one
+
+.. old-state:
+..
+.. * 2 changesets on branch default
+.. * 2 changesets on branch Z on top of them
+..
+.. new-state:
+..
+.. * 2 changesets on branch Z at the same location
+.. * 2 changesets on branch default superseding the other ones
+..
+.. expected-result:
+..
+.. * only one head detected
+..
+.. graph-summary:
+..
+.. D ● (branch Z)
+.. |
+.. C ● (branch Z)
+.. |
+.. B ø⇠◔ B'
+.. | |
+.. A ø⇠◔ A'
+.. |/
+.. ●
+
+ $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log = "{node|short} [{branch}] ({phase}): {desc}\n"
+ > EOF
+
+Test setup
+----------
+
+ $ mkdir A1
+ $ cd A1
+ $ setuprepos single-head
+ creating basic server and client repo
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client
+ $ mkcommit B0
+ $ hg branch Z
+ marked working directory as branch Z
+ (branches are permanent and global, did you want a bookmark?)
+ $ mkcommit C0
+ $ mkcommit D0
+ $ hg push --new-branch
+ pushing to $TESTTMP/A1/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files
+ $ hg up 0
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ $ mkcommit A1
+ created new head
+ $ mkcommit B1
+ $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 3 new orphan changesets
+ $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg heads
+ 262c8c798096 [default] (draft): B1
+ cdf1dbb37a67 [Z] (draft): D0
+ $ hg log -G --hidden
+ @ 262c8c798096 [default] (draft): B1
+ |
+ o f6082bc4ffef [default] (draft): A1
+ |
+ | * cdf1dbb37a67 [Z] (draft): D0
+ | |
+ | * 3213e3e16c67 [Z] (draft): C0
+ | |
+ | x d73caddc5533 [default] (draft): B0
+ | |
+ | x 8aaa48160adc [default] (draft): A0
+ |/
+ o 1e4be0697311 [default] (public): root
+
+
+Actual testing
+--------------
+
+ $ hg push -r 'desc("B1")'
+ pushing to $TESTTMP/A1/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ 2 new obsolescence markers
+ obsoleted 2 changesets
+ 2 new orphan changesets
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-single-head-obsolescence-named-branch-A2.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,113 @@
+=========================================
+Testing single head enforcement: Case A-2
+=========================================
+
+A repository is set to only accept a single head per name (typically named
+branch). However, obsolete changesets can make this enforcement more
+complicated, because they can be kept visible by other changeset on other
+branch.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: Involving obsolescence
+TestCase 2: A branch is split in two, effectively creating two heads
+
+.. old-state:
+..
+.. * 2 changesets on branch default
+.. * 2 changesets on branch Z on top of them
+..
+.. new-state:
+..
+.. * 2 changesets on branch Z at the same location
+.. * 1 changeset on branch default unchanged
+.. * 1 changeset on branch default superseding the other ones
+..
+.. expected-result:
+..
+.. * two heads detected
+..
+.. graph-summary:
+..
+.. D ● (branch Z)
+.. |
+.. C ● (branch Z)
+.. |
+.. B ø⇠◔ B'
+.. | |
+.. A ● |
+.. |/
+.. ●
+
+ $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log = "{node|short} [{branch}] ({phase}): {desc}\n"
+ > EOF
+
+Test setup
+----------
+
+ $ mkdir A2
+ $ cd A2
+ $ setuprepos single-head
+ creating basic server and client repo
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client
+ $ mkcommit B0
+ $ hg branch Z
+ marked working directory as branch Z
+ (branches are permanent and global, did you want a bookmark?)
+ $ mkcommit C0
+ $ mkcommit D0
+ $ hg push --new-branch
+ pushing to $TESTTMP/A2/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files
+ $ hg up 0
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ $ mkcommit B1
+ created new head
+ $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 2 new orphan changesets
+ $ hg heads
+ 25c56d33e4c4 [default] (draft): B1
+ cdf1dbb37a67 [Z] (draft): D0
+ 8aaa48160adc [default] (draft): A0
+ $ hg log -G --hidden
+ @ 25c56d33e4c4 [default] (draft): B1
+ |
+ | * cdf1dbb37a67 [Z] (draft): D0
+ | |
+ | * 3213e3e16c67 [Z] (draft): C0
+ | |
+ | x d73caddc5533 [default] (draft): B0
+ | |
+ | o 8aaa48160adc [default] (draft): A0
+ |/
+ o 1e4be0697311 [default] (public): root
+
+
+Actual testing
+--------------
+
+(force push to make sure we get the changeset on the remote)
+
+ $ hg push -r 'desc("B1")' --force
+ pushing to $TESTTMP/A2/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ transaction abort!
+ rollback completed
+ abort: rejecting multiple heads on branch "default"
+ (2 heads: 8aaa48160adc 25c56d33e4c4)
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-single-head-obsolescence-named-branch-A3.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,120 @@
+=========================================
+Testing single head enforcement: Case A-3
+=========================================
+
+A repository is set to only accept a single head per name (typically named
+branch). However, obsolete changesets can make this enforcement more
+complicated, because they can be kept visible by other changeset on other
+branch.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: Involving obsolescence
+TestCase 3: Full superseding of a branch interleaved with another
+
+.. old-state:
+..
+.. * 2 changesets on branch default
+.. * 2 changesets on branch Z interleaved with the other
+..
+.. new-state:
+..
+.. * 2 changesets on branch Z at the same location
+.. * 2 changesets on branch default superseding the other ones
+..
+.. expected-result:
+..
+.. * only one head detected
+..
+.. graph-summary:
+..
+.. D ● (branch Z)
+.. |
+.. C ø⇠◔ C'
+.. | |
+.. B ● | (branch Z)
+.. | |
+.. A ø⇠◔ A'
+.. |/
+.. ●
+
+ $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log = "{node|short} [{branch}] ({phase}): {desc}\n"
+ > EOF
+
+Test setup
+----------
+
+ $ mkdir A3
+ $ cd A3
+ $ setuprepos single-head
+ creating basic server and client repo
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client
+ $ hg branch Z
+ marked working directory as branch Z
+ (branches are permanent and global, did you want a bookmark?)
+ $ mkcommit B0
+ $ hg branch default --force
+ marked working directory as branch default
+ $ mkcommit C0
+ created new head
+ $ hg branch Z --force
+ marked working directory as branch Z
+ $ mkcommit D0
+ created new head
+ $ hg push --new-branch
+ pushing to $TESTTMP/A3/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files
+ $ hg up 0
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ $ mkcommit A1
+ created new head
+ $ mkcommit C1
+ $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 3 new orphan changesets
+ $ hg debugobsolete `getid "desc(C0)"` `getid "desc(C1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ $ hg heads
+ 0c76bc104656 [default] (draft): C1
+ 78578c4306ce [Z] (draft): D0
+ $ hg log -G --hidden
+ @ 0c76bc104656 [default] (draft): C1
+ |
+ o f6082bc4ffef [default] (draft): A1
+ |
+ | * 78578c4306ce [Z] (draft): D0
+ | |
+ | x afc55ba2ce61 [default] (draft): C0
+ | |
+ | * 93e5c1321ece [Z] (draft): B0
+ | |
+ | x 8aaa48160adc [default] (draft): A0
+ |/
+ o 1e4be0697311 [default] (public): root
+
+
+Actual testing
+--------------
+
+ $ hg push -r 'desc("C1")'
+ pushing to $TESTTMP/A3/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ 2 new obsolescence markers
+ obsoleted 2 changesets
+ 2 new orphan changesets
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-single-head-obsolescence-named-branch-A4.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,117 @@
+=========================================
+Testing single head enforcement: Case A-4
+=========================================
+
+A repository is set to only accept a single head per name (typically named
+branch). However, obsolete changesets can make this enforcement more
+complicated, because they can be kept visible by other changeset on other
+branch.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: Involving obsolescence
+TestCase 4: Partial rewrite of a branch to deinterleave it
+
+.. old-state:
+..
+.. * 2 changesets on branch default
+.. * 2 changesets on branch Z interleaved with the other one
+..
+.. new-state:
+..
+.. * 2 changesets on branch Z at the same location
+.. * 1 changeset on default untouched (the lower one)
+.. * 1 changeset on default moved on the other one
+..
+.. expected-result:
+..
+.. * only one head detected
+..
+.. graph-summary:
+..
+.. D ● (branch Z)
+.. |
+.. C ø⇠◔ C'
+.. | |
+.. B ● | (branch Z)
+.. |/
+.. A ●
+.. |
+.. ●
+
+ $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log = "{node|short} [{branch}] ({phase}): {desc}\n"
+ > EOF
+
+Test setup
+----------
+
+ $ mkdir A4
+ $ cd A4
+ $ setuprepos single-head
+ creating basic server and client repo
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client
+ $ hg branch Z
+ marked working directory as branch Z
+ (branches are permanent and global, did you want a bookmark?)
+ $ mkcommit B0
+ $ hg branch default --force
+ marked working directory as branch default
+ $ mkcommit C0
+ created new head
+ $ hg branch Z --force
+ marked working directory as branch Z
+ $ mkcommit D0
+ created new head
+ $ hg push --new-branch
+ pushing to $TESTTMP/A4/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 3 files
+ $ hg up 'desc("A0")'
+ 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ mkcommit C1
+ created new head
+ $ hg debugobsolete `getid "desc(C0)"` `getid "desc(C1)"`
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
+ $ hg heads
+ cfe9ed94fa4a [default] (draft): C1
+ 78578c4306ce [Z] (draft): D0
+ $ hg log -G --hidden
+ @ cfe9ed94fa4a [default] (draft): C1
+ |
+ | * 78578c4306ce [Z] (draft): D0
+ | |
+ | x afc55ba2ce61 [default] (draft): C0
+ | |
+ | o 93e5c1321ece [Z] (draft): B0
+ |/
+ o 8aaa48160adc [default] (draft): A0
+ |
+ o 1e4be0697311 [default] (public): root
+
+
+Actual testing
+--------------
+
+(force push to make sure we get the changeset on the remote)
+
+ $ hg push -r 'desc("C1")' --force
+ pushing to $TESTTMP/A4/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-single-head-obsolescence-named-branch-A5.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,108 @@
+=========================================
+Testing single head enforcement: Case A-5
+=========================================
+
+A repository is set to only accept a single head per name (typically named
+branch). However, obsolete changesets can make this enforcement more
+complicated, because they can be kept visible by other changeset on other
+branch.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: Involving obsolescence
+TestCase 5: Obsoleting a merge reveals two heads
+
+.. old-state:
+..
+.. * 3 changesets on branch default (2 on their own branch + 1 merge)
+.. * 1 changeset on branch Z (children of the merge)
+..
+.. new-state:
+..
+.. * 2 changesets on branch default (merge is obsolete) each a head
+.. * 1 changeset on branch Z keeping the merge visible
+..
+.. expected-result:
+..
+.. * 2 heads detected (because we skip the merge)
+..
+.. graph-summary:
+..
+.. C ● (branch Z)
+.. |
+.. M ⊗
+.. |\
+.. A ● ● B
+.. |/
+.. ●
+
+ $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+ $ cat >> $HGRCPATH << EOF
+ > [command-templates]
+ > log = "{node|short} [{branch}] ({phase}): {desc}\n"
+ > EOF
+
+Test setup
+----------
+
+ $ mkdir A5
+ $ cd A5
+ $ setuprepos single-head
+ creating basic server and client repo
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client
+ $ hg up 0
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ mkcommit B0
+ created new head
+ $ hg merge
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'M0'
+ $ hg branch Z
+ marked working directory as branch Z
+ (branches are permanent and global, did you want a bookmark?)
+ $ mkcommit C0
+ $ hg push --new-branch
+ pushing to $TESTTMP/A5/server
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ $ hg debugobsolete `getid "desc(M0)"` --record-parents
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new orphan changesets
+ $ hg heads
+ 61c95483cc12 [Z] (draft): C0
+ 74ff5441d343 [default] (draft): B0
+ 8aaa48160adc [default] (draft): A0
+ $ hg log -G --hidden
+ @ 61c95483cc12 [Z] (draft): C0
+ |
+ x 14d3d4d41d1a [default] (draft): M0
+ |\
+ | o 74ff5441d343 [default] (draft): B0
+ | |
+ o | 8aaa48160adc [default] (draft): A0
+ |/
+ o 1e4be0697311 [default] (public): root
+
+
+Actual testing
+--------------
+
+(force push to make sure we get the changeset on the remote)
+
+ $ hg push -r 'desc("C0")' --force
+ pushing to $TESTTMP/A5/server
+ searching for changes
+ no changes found
+ transaction abort!
+ rollback completed
+ abort: rejecting multiple heads on branch "default"
+ (2 heads: 8aaa48160adc 74ff5441d343)
+ [255]
--- a/tests/test-sparse-clear.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sparse-clear.t Fri Feb 18 14:27:43 2022 +0100
@@ -42,7 +42,7 @@
Clear rules when there are excludes
- $ hg debugsparse --exclude *.sparse
+ $ hg debugsparse -X base.sparse -X webpage.sparse
$ ls -A
.hg
data.py
--- a/tests/test-sparse-profiles.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sparse-profiles.t Fri Feb 18 14:27:43 2022 +0100
@@ -128,8 +128,8 @@
$ hg merge 1
temporarily included 2 file(s) in the sparse checkout for merging
merging backend.sparse
+ warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
merging data.py
- warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
warning: conflicts while merging data.py! (edit, then use 'hg resolve --mark')
0 files updated, 0 files merged, 0 files removed, 2 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
@@ -197,8 +197,8 @@
rebasing 1:a2b1de640a62 "edit profile"
temporarily included 2 file(s) in the sparse checkout for merging
merging backend.sparse
+ warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
merging data.py
- warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
warning: conflicts while merging data.py! (edit, then use 'hg resolve --mark')
unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[240]
--- a/tests/test-sparse-requirement.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sparse-requirement.t Fri Feb 18 14:27:43 2022 +0100
@@ -16,7 +16,7 @@
Enable sparse profile
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -24,6 +24,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -36,7 +37,7 @@
Requirement for sparse added when sparse is enabled
- $ cat .hg/requires
+ $ hg debugrequires --config extensions.sparse=
dotencode
dirstate-v2 (dirstate-v2 !)
exp-sparse
@@ -45,6 +46,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -59,7 +61,7 @@
$ hg debugsparse --reset --config extensions.sparse=
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -67,6 +69,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-sparse.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sparse.t Fri Feb 18 14:27:43 2022 +0100
@@ -147,7 +147,7 @@
Verify deleting sparseness with --force brings back files
- $ hg debugsparse --delete -f 'show*'
+ $ hg debugsparse -f --delete 'show*'
pending changes to 'hide'
$ ls -A
.hg
@@ -170,7 +170,7 @@
Verify adding sparseness hides files
- $ hg debugsparse --exclude -f 'hide*'
+ $ hg debugsparse -f --exclude 'hide*'
pending changes to 'hide'
$ ls -A
.hg
@@ -254,6 +254,15 @@
hide*
+Multiple -I and -X can be passed at once
+
+ $ hg debugsparse --reset -I '*2' -X 'hide2'
+ $ ls -A
+ .hg
+ hide.orig
+ show2
+ $ hg debugsparse --reset -X 'hide*'
+
Verify strip -k resets dirstate correctly
$ hg status
--- a/tests/test-sqlitestore.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-sqlitestore.t Fri Feb 18 14:27:43 2022 +0100
@@ -13,7 +13,7 @@
New repo should not use SQLite by default
$ hg init empty-no-sqlite
- $ cat empty-no-sqlite/.hg/requires
+ $ hg debugrequires -R empty-no-sqlite
dotencode
dirstate-v2 (dirstate-v2 !)
fncache
@@ -21,13 +21,14 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
storage.new-repo-backend=sqlite is recognized
$ hg --config storage.new-repo-backend=sqlite init empty-sqlite
- $ cat empty-sqlite/.hg/requires
+ $ hg debugrequires -R empty-sqlite
dotencode
dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
@@ -38,6 +39,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
@@ -49,7 +51,7 @@
Can force compression to zlib
$ hg --config storage.sqlite.compression=zlib init empty-zlib
- $ cat empty-zlib/.hg/requires
+ $ hg debugrequires -R empty-zlib
dotencode
dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
@@ -59,13 +61,14 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
Can force compression to none
$ hg --config storage.sqlite.compression=none init empty-none
- $ cat empty-none/.hg/requires
+ $ hg debugrequires -R empty-none
dotencode
dirstate-v2 (dirstate-v2 !)
exp-sqlite-001
@@ -75,6 +78,7 @@
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
--- a/tests/test-ssh-bundle1.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-ssh-bundle1.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,16 +1,6 @@
This test is a duplicate of 'test-http.t' feel free to factor out
parts that are not bundle1/bundle2 specific.
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
$ cat << EOF >> $HGRCPATH
> [devel]
> # This test is dedicated to interaction through old bundle
@@ -483,15 +473,11 @@
$ hg pull --debug ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
- remote: 444 (sshv1 no-rust !)
- remote: 463 (sshv1 rust !)
- protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
- remote: 1 (sshv1 !)
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
+ remote: 1
sending protocaps command
preparing listkeys for "bookmarks"
sending listkeys command
--- a/tests/test-ssh-clone-r.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-ssh-clone-r.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,15 +1,5 @@
This test tries to exercise the ssh functionality with a dummy script
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
creating 'remote' repo
$ hg init remote
--- a/tests/test-ssh-proto-unbundle.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-ssh-proto-unbundle.t Fri Feb 18 14:27:43 2022 +0100
@@ -5,13 +5,6 @@
> use-persistent-nodemap = no
> EOF
- $ cat > hgrc-sshv2 << EOF
- > %include $HGRCPATH
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-
$ debugwireproto() {
> commands=`cat -`
> echo 'testing ssh1'
@@ -20,12 +13,6 @@
> if [ -n "$1" ]; then
> hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}"
> fi
- > echo ""
- > echo 'testing ssh2'
- > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh --noreadstderr
- > if [ -n "$1" ]; then
- > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}"
- > fi
> }
Generate some bundle files
@@ -63,9 +50,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -103,56 +90,6 @@
e> read(-1) -> 115:
e> abort: incompatible Mercurial client; bundle2 required\n
e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 115:
- e> abort: incompatible Mercurial client; bundle2 required\n
- e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n
$ cd ..
@@ -242,9 +179,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -287,61 +224,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 151:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 1 line\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
And a variation that writes multiple lines using ui.write
@@ -365,10 +247,10 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
- o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> readline\(\) -> \d+: (re)
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -412,62 +294,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 173:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 2 lines 1\n
- e> ui.write 2 lines 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
And a variation that does a ui.flush() after writing output
@@ -492,9 +318,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -537,61 +363,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 157:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 1 line flush\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
Multiple writes + flush
@@ -616,9 +387,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -662,62 +433,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 161:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 1st\n
- e> ui.write 2nd\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
ui.write() + ui.write_err() output is captured
@@ -742,9 +457,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -790,64 +505,7 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 187:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 1\n
- e> ui.write_err 1\n
- e> ui.write 2\n
- e> ui.write_err 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
+
print() output is captured
@@ -872,9 +530,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -917,61 +575,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 148:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> printed line\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
Mixed print() and ui.write() are both captured
@@ -996,9 +599,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1044,64 +647,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 173:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> print 1\n
- e> ui.write 1\n
- e> print 2\n
- e> ui.write 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
print() to stdout and stderr both get captured
@@ -1126,9 +671,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1174,64 +719,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 171:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> stdout 1\n
- e> stderr 1\n
- e> stdout 2\n
- e> stderr 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook failed\n
Shell hook writing to stdout has output captured
@@ -1262,9 +749,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1308,63 +795,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 167:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> stdout 1\n
- e> stdout 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
Shell hook writing to stderr has output captured
$ cat > $TESTTMP/hook.sh << EOF
@@ -1389,9 +819,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1435,63 +865,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 167:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> stderr 1\n
- e> stderr 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
Shell hook writing to stdout and stderr has output captured
$ cat > $TESTTMP/hook.sh << EOF
@@ -1518,9 +891,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1566,65 +939,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 185:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> stdout 1\n
- e> stderr 1\n
- e> stdout 2\n
- e> stderr 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.fail hook exited with status 1\n
-
Shell and Python hooks writing to stdout and stderr have output captured
$ cat > $TESTTMP/hook.sh << EOF
@@ -1657,9 +971,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1709,69 +1023,6 @@
e> transaction abort!\n
e> rollback completed\n
e> abort: pretxnchangegroup.b hook failed\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 0
- result: 0
- remote output:
- e> read(-1) -> 228:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> shell stdout 1\n
- e> shell stderr 1\n
- e> shell stdout 2\n
- e> shell stderr 2\n
- e> stdout 1\n
- e> stderr 1\n
- e> stdout 2\n
- e> stderr 2\n
- e> transaction abort!\n
- e> rollback completed\n
- e> abort: pretxnchangegroup.b hook failed\n
-
$ cd ..
Pushing a bundle1 with no output
@@ -1795,9 +1046,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1837,59 +1088,6 @@
e> adding manifests\n
e> adding file changes\n
e> added 1 changesets with 1 changes to 1 files\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 1
- result: 1
- remote output:
- e> read(-1) -> 100:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> added 1 changesets with 1 changes to 1 files\n
-
$ cd ..
Pushing a bundle1 with ui.write() and ui.write_err()
@@ -1925,9 +1123,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1971,59 +1169,3 @@
e> ui.write 2\n
e> ui.write_err 2\n
e> added 1 changesets with 1 changes to 1 files\n
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending unbundle command
- i> write(9) -> 9:
- i> unbundle\n
- i> write(9) -> 9:
- i> heads 10\n
- i> write(10) -> 10: 666f726365
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- i> write(4) -> 4:
- i> 426\n
- i> write(426) -> 426:
- i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n
- i> test\n
- i> 0 0\n
- i> foo\n
- i> \n
- i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n
- i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n
- i> \x00\x00\x00\x00\x00\x00\x00\x00
- i> write(2) -> 2:
- i> 0\n
- i> flush() -> None
- o> readline() -> 2:
- o> 0\n
- o> readline() -> 2:
- o> 1\n
- o> read(1) -> 1: 1
- result: 1
- remote output:
- e> read(-1) -> 152:
- e> adding changesets\n
- e> adding manifests\n
- e> adding file changes\n
- e> ui.write 1\n
- e> ui.write_err 1\n
- e> ui.write 2\n
- e> ui.write_err 2\n
- e> added 1 changesets with 1 changes to 1 files\n
--- a/tests/test-ssh-proto.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-ssh-proto.t Fri Feb 18 14:27:43 2022 +0100
@@ -7,13 +7,6 @@
> use-persistent-nodemap = no
> EOF
- $ cat > hgrc-sshv2 << EOF
- > %include $HGRCPATH
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-
Helper function to run protocol tests against multiple protocol versions.
This is easier than using #testcases because managing differences between
protocols with inline conditional output is hard to read.
@@ -22,9 +15,6 @@
> commands=`cat -`
> echo 'testing ssh1'
> echo "${commands}" | hg --verbose debugwireproto --localssh
- > echo ""
- > echo 'testing ssh2'
- > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh
> }
$ cat >> $HGRCPATH << EOF
@@ -54,9 +44,6 @@
$ hg debugwireproto --localssh --peer ssh1 << EOF
> EOF
creating ssh peer for wire protocol version 1
- $ hg debugwireproto --localssh --peer ssh2 << EOF
- > EOF
- creating ssh peer for wire protocol version 2
Test a normal behaving server, for sanity
@@ -68,8 +55,8 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 444
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -90,9 +77,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
`hg debugserve --sshstdio` works
@@ -100,8 +87,8 @@
$ hg debugserve --sshstdio << EOF
> hello
> EOF
- 444
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ \d+ (re)
+ capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
I/O logging works
@@ -110,25 +97,25 @@
> EOF
e> flush() -> None
o> write(4) -> 4:
- o> 444\n
- o> write(444) -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- 444
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> \d+\\n (re)
+ o> write\(\d+\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
+ \d+ (re)
+ capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
o> flush() -> None
$ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
> hello
> EOF
- 444
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ \d+ (re)
+ capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
$ cat $TESTTMP/io
e> flush() -> None
o> write(4) -> 4:
- o> 444\n
- o> write(444) -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> write\(\d+\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> flush() -> None
$ cd ..
@@ -153,9 +140,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -190,8 +177,8 @@
remote: banner: line 7
remote: banner: line 8
remote: banner: line 9
- remote: 444
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -248,9 +235,9 @@
o> readline() -> 15:
o> banner: line 9\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -299,13 +286,13 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
+ o> \d+\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
@@ -317,8 +304,8 @@
sending hello command
sending between command
remote: 0
- remote: 444
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -366,9 +353,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -390,8 +377,8 @@
remote: 0
remote: 0
remote: 0
- remote: 444
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -447,9 +434,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -494,9 +481,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -539,9 +526,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -609,9 +596,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
Incomplete dictionary send
@@ -691,9 +678,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -725,9 +712,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -768,9 +755,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -797,9 +784,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
i> write(105) -> 105:
i> between\n
i> pairs 81\n
@@ -838,9 +825,9 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -887,9 +874,9 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
@@ -914,411 +901,7 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 444\n
-
-Send an upgrade request to a server that doesn't support that command
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n
- > readline
- > raw
- > hello\n
- > between\n
- > pairs 81\n
- > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- > readline
- > readline
- > readline
- > readline
- > EOF
- using raw connection to peer
- i> write(77) -> 77:
- i> upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n
- o> readline() -> 2:
- o> 0\n
- i> write(104) -> 104:
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- o> readline() -> 2:
- o> 1\n
- o> readline() -> 1:
- o> \n
-
- $ cd ..
-
- $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
- running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
- devel-peer-request: hello+between
- devel-peer-request: pairs: 81 bytes
- sending hello command
- sending between command
- remote: 0
- remote: 444
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- remote: 1
- devel-peer-request: protocaps
- devel-peer-request: caps: * bytes (glob)
- sending protocaps command
- url: ssh://user@dummy/server
- local: no
- pushable: yes
-
-Enable version 2 support on server. We need to do this in hgrc because we can't
-use --config with `hg serve --stdio`.
-
- $ cat >> server/.hg/hgrc << EOF
- > [experimental]
- > sshserver.support-v2 = true
- > EOF
-
-Send an upgrade request to a server that supports upgrade
-
- $ cd server
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- > hello\n
- > between\n
- > pairs 81\n
- > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- > readline
- > readline
- > readline
- > EOF
- using raw connection to peer
- i> write(153) -> 153:
- i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 44:
- o> upgraded this-is-some-token exp-ssh-v2-0003\n
- o> readline() -> 4:
- o> 443\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
-
- $ cd ..
-
- $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
- running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
- devel-peer-request: hello+between
- devel-peer-request: pairs: 81 bytes
- sending hello command
- sending between command
- protocol upgraded to exp-ssh-v2-0003
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- devel-peer-request: protocaps
- devel-peer-request: caps: * bytes (glob)
- sending protocaps command
- url: ssh://user@dummy/server
- local: no
- pushable: yes
-
-Verify the peer has capabilities
-
- $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
- running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R server serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
- devel-peer-request: hello+between
- devel-peer-request: pairs: 81 bytes
- sending hello command
- sending between command
- protocol upgraded to exp-ssh-v2-0003
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- devel-peer-request: protocaps
- devel-peer-request: caps: * bytes (glob)
- sending protocaps command
- Main capabilities:
- batch
- branchmap
- $USUAL_BUNDLE2_CAPS$
- changegroupsubset
- getbundle
- known
- lookup
- protocaps
- pushkey
- streamreqs=generaldelta,revlogv1,sparserevlog
- unbundle=HG10GZ,HG10BZ,HG10UN
- unbundlehash
- Bundle2 capabilities:
- HG20
- bookmarks
- changegroup
- 01
- 02
- checkheads
- related
- digests
- md5
- sha1
- sha512
- error
- abort
- unsupportedcontent
- pushraced
- pushkey
- hgtagsfnodes
- listkeys
- phases
- heads
- pushkey
- remote-changegroup
- http
- https
- stream
- v2
-
-Command after upgrade to version 2 is processed
-
- $ cd server
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- > hello\n
- > between\n
- > pairs 81\n
- > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- > readline
- > readline
- > readline
- > raw
- > hello\n
- > readline
- > readline
- > EOF
- using raw connection to peer
- i> write(153) -> 153:
- i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 44:
- o> upgraded this-is-some-token exp-ssh-v2-0003\n
- o> readline() -> 4:
- o> 443\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- i> write(6) -> 6:
- i> hello\n
- o> readline() -> 4:
- o> 428\n
- o> readline() -> 428:
- o> capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
-
-Multiple upgrades is not allowed
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- > hello\n
- > between\n
- > pairs 81\n
- > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- > readline
- > readline
- > readline
- > raw
- > upgrade another-token proto=irrelevant\n
- > hello\n
- > readline
- > readavailable
- > EOF
- using raw connection to peer
- i> write(153) -> 153:
- i> upgrade this-is-some-token proto=exp-ssh-v2-0003\n
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 44:
- o> upgraded this-is-some-token exp-ssh-v2-0003\n
- o> readline() -> 4:
- o> 443\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- i> write(45) -> 45:
- i> upgrade another-token proto=irrelevant\n
- i> hello\n
- o> readline() -> 1:
- o> \n
- e> read(-1) -> 42:
- e> cannot upgrade protocols multiple times\n
- e> -\n
-
-Malformed upgrade request line (not exactly 3 space delimited tokens)
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade\n
- > readline
- > EOF
- using raw connection to peer
- i> write(8) -> 8:
- i> upgrade\n
- o> readline() -> 2:
- o> 0\n
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade token\n
- > readline
- > EOF
- using raw connection to peer
- i> write(14) -> 14:
- i> upgrade token\n
- o> readline() -> 2:
- o> 0\n
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade token foo=bar extra-token\n
- > readline
- > EOF
- using raw connection to peer
- i> write(34) -> 34:
- i> upgrade token foo=bar extra-token\n
- o> readline() -> 2:
- o> 0\n
-
-Upgrade request to unsupported protocol is ignored
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade this-is-some-token proto=unknown1,unknown2\n
- > readline
- > raw
- > hello\n
- > readline
- > readline
- > raw
- > between\n
- > pairs 81\n
- > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- > readline
- > readline
- > EOF
- using raw connection to peer
- i> write(51) -> 51:
- i> upgrade this-is-some-token proto=unknown1,unknown2\n
- o> readline() -> 2:
- o> 0\n
- i> write(6) -> 6:
- i> hello\n
- o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- i> write(98) -> 98:
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 2:
- o> 1\n
- o> readline() -> 1:
- o> \n
-
-Upgrade request must be followed by hello + between
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade token proto=exp-ssh-v2-0003\n
- > invalid\n
- > readline
- > readavailable
- > EOF
- using raw connection to peer
- i> write(44) -> 44:
- i> upgrade token proto=exp-ssh-v2-0003\n
- i> invalid\n
- o> readline() -> 1:
- o> \n
- e> read(-1) -> 46:
- e> malformed handshake protocol: missing hello\n
- e> -\n
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade token proto=exp-ssh-v2-0003\n
- > hello\n
- > invalid\n
- > readline
- > readavailable
- > EOF
- using raw connection to peer
- i> write(50) -> 50:
- i> upgrade token proto=exp-ssh-v2-0003\n
- i> hello\n
- i> invalid\n
- o> readline() -> 1:
- o> \n
- e> read(-1) -> 48:
- e> malformed handshake protocol: missing between\n
- e> -\n
-
- $ hg debugwireproto --localssh --peer raw << EOF
- > raw
- > upgrade token proto=exp-ssh-v2-0003\n
- > hello\n
- > between\n
- > invalid\n
- > readline
- > readavailable
- > EOF
- using raw connection to peer
- i> write(58) -> 58:
- i> upgrade token proto=exp-ssh-v2-0003\n
- i> hello\n
- i> between\n
- i> invalid\n
- o> readline() -> 1:
- o> \n
- e> read(-1) -> 49:
- e> malformed handshake protocol: missing pairs 81\n
- e> -\n
-
-Legacy commands are not exposed to version 2 of protocol
-
-TODO re-enable these once we're back to actually using v2 commands
-
-$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
-> command branches
-> nodes 0000000000000000000000000000000000000000
-> EOF
-creating ssh peer from handshake results
-sending branches command
-response:
-
-$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
-> command changegroup
-> roots 0000000000000000000000000000000000000000
-> EOF
-creating ssh peer from handshake results
-sending changegroup command
-response:
-
-$ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF
-> command changegroupsubset
-> bases 0000000000000000000000000000000000000000
-> heads 0000000000000000000000000000000000000000
-> EOF
-creating ssh peer from handshake results
-sending changegroupsubset command
-response:
+ o> \d+\\n (re)
$ cd ..
@@ -1339,9 +922,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1364,41 +947,6 @@
b'namespaces': b'',
b'phases': b''
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(13) -> 13:
- i> namespace 10\n
- i> write(10) -> 10: namespaces
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 30\n
- o> bufferedread(30) -> 30:
- o> bookmarks\t\n
- o> namespaces\t\n
- o> phases\t
- response: {
- b'bookmarks': b'',
- b'namespaces': b'',
- b'phases': b''
- }
$ cd ..
@@ -1427,9 +975,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1444,33 +992,6 @@
o> bufferedreadline() -> 2:
o> 0\n
response: {}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 9\n
- i> write(9) -> 9: bookmarks
- i> flush() -> None
- o> bufferedreadline() -> 2:
- o> 0\n
- response: {}
With a single bookmark set
@@ -1488,9 +1009,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1508,36 +1029,6 @@
response: {
b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 9\n
- i> write(9) -> 9: bookmarks
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 46\n
- o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f
- response: {
- b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
- }
With multiple bookmarks set
@@ -1555,9 +1046,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1578,39 +1069,6 @@
b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 9\n
- i> write(9) -> 9: bookmarks
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 93\n
- o> bufferedread(93) -> 93:
- o> bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n
- o> bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d
- response: {
- b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
- b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
- }
Test pushkey for bookmarks
@@ -1630,9 +1088,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1657,43 +1115,6 @@
o> bufferedread(2) -> 2:
o> 1\n
response: True
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending pushkey command
- i> write(8) -> 8:
- i> pushkey\n
- i> write(6) -> 6:
- i> key 6\n
- i> write(6) -> 6: remote
- i> write(12) -> 12:
- i> namespace 9\n
- i> write(9) -> 9: bookmarks
- i> write(7) -> 7:
- i> new 40\n
- i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f
- i> write(6) -> 6:
- i> old 0\n
- i> flush() -> None
- o> bufferedreadline() -> 2:
- o> 2\n
- o> bufferedread(2) -> 2:
- o> 1\n
- response: True
$ hg bookmarks
bookA 0:68986213bd44
@@ -1722,9 +1143,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1742,36 +1163,6 @@
response: {
b'publishing': b'True'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 6\n
- i> write(6) -> 6: phases
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 15\n
- o> bufferedread(15) -> 15: publishing\tTrue
- response: {
- b'publishing': b'True'
- }
Create some commits
@@ -1805,9 +1196,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1830,41 +1221,6 @@
b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
b'publishing': b'True'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 6\n
- i> write(6) -> 6: phases
- i> flush() -> None
- o> bufferedreadline() -> 4:
- o> 101\n
- o> bufferedread(101) -> 101:
- o> 20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n
- o> c4750011d906c18ea2f0527419cbc1a544435150\t1\n
- o> publishing\tTrue
- response: {
- b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1',
- b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
- b'publishing': b'True'
- }
Single draft head
@@ -1882,9 +1238,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1905,39 +1261,6 @@
b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
b'publishing': b'True'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 6\n
- i> write(6) -> 6: phases
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 58\n
- o> bufferedread(58) -> 58:
- o> c4750011d906c18ea2f0527419cbc1a544435150\t1\n
- o> publishing\tTrue
- response: {
- b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
- b'publishing': b'True'
- }
All public heads
@@ -1955,9 +1278,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1975,36 +1298,6 @@
response: {
b'publishing': b'True'
}
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending listkeys command
- i> write(9) -> 9:
- i> listkeys\n
- i> write(12) -> 12:
- i> namespace 6\n
- i> write(6) -> 6: phases
- i> flush() -> None
- o> bufferedreadline() -> 3:
- o> 15\n
- o> bufferedread(15) -> 15: publishing\tTrue
- response: {
- b'publishing': b'True'
- }
Setting public phase via pushkey
@@ -2026,9 +1319,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -2054,44 +1347,6 @@
o> bufferedread(2) -> 2:
o> 1\n
response: True
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending pushkey command
- i> write(8) -> 8:
- i> pushkey\n
- i> write(7) -> 7:
- i> key 40\n
- i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b
- i> write(12) -> 12:
- i> namespace 6\n
- i> write(6) -> 6: phases
- i> write(6) -> 6:
- i> new 1\n
- i> write(1) -> 1: 0
- i> write(6) -> 6:
- i> old 1\n
- i> write(1) -> 1: 1
- i> flush() -> None
- o> bufferedreadline() -> 2:
- o> 2\n
- o> bufferedread(2) -> 2:
- o> 1\n
- response: True
$ hg phase .
4: public
@@ -2133,9 +1388,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 444\n
- o> readline() -> 444:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> \d+\\n (re)
+ o> readline\(\) -> \d+: (re)
+ o> capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\\n (re)
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -2160,40 +1415,3 @@
response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
response #1: bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6
response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\npublishing\tTrue
-
- testing ssh2
- creating ssh peer from handshake results
- i> write(171) -> 171:
- i> upgrade * proto=exp-ssh-v2-0003\n (glob)
- i> hello\n
- i> between\n
- i> pairs 81\n
- i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- i> flush() -> None
- o> readline() -> 62:
- o> upgraded * exp-ssh-v2-0003\n (glob)
- o> readline() -> 4:
- o> 443\n
- o> read(443) -> 443: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- o> read(1) -> 1:
- o> \n
- sending batch with 3 sub-commands
- i> write(6) -> 6:
- i> batch\n
- i> write(4) -> 4:
- i> * 0\n
- i> write(8) -> 8:
- i> cmds 61\n
- i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases
- i> flush() -> None
- o> bufferedreadline() -> 4:
- o> 278\n
- o> bufferedread(278) -> 278:
- o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
- o> ;bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
- o> bookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\n
- o> bfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\n
- o> publishing\tTrue
- response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n
- response #1: bookA\t4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB\tbfebe6bd38eebc6f8202e419c1171268987ea6a6
- response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\t1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6\t1\npublishing\tTrue
--- a/tests/test-ssh.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-ssh.t Fri Feb 18 14:27:43 2022 +0100
@@ -1,13 +1,3 @@
-#testcases sshv1 sshv2
-
-#if sshv2
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > sshpeer.advertise-v2 = true
- > sshserver.support-v2 = true
- > EOF
-#endif
-
This test tries to exercise the ssh functionality with a dummy script
creating 'remote' repo
@@ -537,17 +527,13 @@
$ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
pulling from ssh://user@dummy/remote
running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R remote serve --stdio['"] (re)
- sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
devel-peer-request: hello+between
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 444 (sshv1 no-rust !)
- remote: 463 (sshv1 rust !)
- protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (no-rust !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,persistent-nodemap,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (rust !)
- remote: 1 (sshv1 !)
+ remote: \d+ (re)
+ remote: capabilities: batch branchmap \$USUAL_BUNDLE2_CAPS\$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=[^ ,]+(,[^ ,]+)* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (re)
+ remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
sending protocaps command
--- a/tests/test-static-http.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-static-http.t Fri Feb 18 14:27:43 2022 +0100
@@ -95,7 +95,7 @@
$ cd ..
$ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
abort: unknown revision 'doesnotexist'
- [255]
+ [10]
$ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
adding changesets
adding manifests
@@ -240,6 +240,7 @@
/.hg/store/data/a.i
/.hg/store/data/~2ehgsub.i (py37 !)
/.hg/store/data/~2ehgsubstate.i (py37 !)
+ /.hg/store/requires
/notarepo/.hg/00changelog.i
/notarepo/.hg/requires
/remote-with-names/.hg/bookmarks
@@ -255,6 +256,8 @@
/remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
/remote-with-names/.hg/store/data/foo.i
/remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
+ /remote-with-names/.hg/store/obsstore
+ /remote-with-names/.hg/store/requires
/remote/.hg/bookmarks
/remote/.hg/bookmarks.current
/remote/.hg/cache/branch2-base
@@ -274,12 +277,15 @@
/remote/.hg/store/data/quux.i
/remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
/remote/.hg/store/data/~2ehgtags.i (py37 !)
+ /remote/.hg/store/obsstore
+ /remote/.hg/store/requires
/remotempty/.hg/bookmarks
/remotempty/.hg/bookmarks.current
/remotempty/.hg/dirstate
/remotempty/.hg/requires
/remotempty/.hg/store/00changelog.i
/remotempty/.hg/store/00manifest.i
+ /remotempty/.hg/store/requires
/sub/.hg/bookmarks
/sub/.hg/bookmarks.current
/sub/.hg/cache/hgtagsfnodes1
@@ -290,3 +296,4 @@
/sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
/sub/.hg/store/data/test.i
/sub/.hg/store/data/~2ehgtags.i (py37 !)
+ /sub/.hg/store/requires
--- a/tests/test-status-color.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-status-color.t Fri Feb 18 14:27:43 2022 +0100
@@ -311,8 +311,8 @@
$ hg --config color.status.modified=periwinkle status
ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
- ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
- ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
+ ignoring unknown color/effect 'periwinkle' (configured in color.status.modified) (no-rhg !)
+ ignoring unknown color/effect 'periwinkle' (configured in color.status.modified) (no-rhg !)
M modified
\x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
\x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
@@ -375,8 +375,8 @@
created new head
$ hg merge
merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
merging b
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
0 files updated, 0 files merged, 0 files removed, 2 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-status-tracked-key.t Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,204 @@
+===============================
+Test the "tracked hint" feature
+===============================
+
+The tracked hint feature provide a file that get updated when the set of tracked
+files get updated.
+
+basic setup
+
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > use-dirstate-tracked-hint=yes
+ > EOF
+
+ $ hg init tracked-hint-test
+ $ cd tracked-hint-test
+ $ hg debugbuilddag '.+10' -n
+ $ hg log -G -T '{rev} {desc} {files}\n'
+ o 10 r10 nf10
+ |
+ o 9 r9 nf9
+ |
+ o 8 r8 nf8
+ |
+ o 7 r7 nf7
+ |
+ o 6 r6 nf6
+ |
+ o 5 r5 nf5
+ |
+ o 4 r4 nf4
+ |
+ o 3 r3 nf3
+ |
+ o 2 r2 nf2
+ |
+ o 1 r1 nf1
+ |
+ o 0 r0 nf0
+
+ $ hg up tip
+ 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg files
+ nf0
+ nf1
+ nf10
+ nf2
+ nf3
+ nf4
+ nf5
+ nf6
+ nf7
+ nf8
+ nf9
+
+key-file exists
+-----------
+
+The tracked hint file should exist
+
+ $ ls -1 .hg/dirstate*
+ .hg/dirstate
+ .hg/dirstate-tracked-hint
+
+key-file stay the same if the tracked set is unchanged
+------------------------------------------------------
+
+(copy its content for later comparison)
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ echo foo >> nf0
+ $ sleep 1
+ $ hg status
+ M nf0
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ $ hg revert -C nf0
+ $ sleep 1
+ $ hg status
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+
+key-file change if the tracked set is changed manually
+------------------------------------------------------
+
+adding a file to tracking
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ echo x > x
+ $ hg add x
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+remove a file from tracking
+(forget)
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg forget x
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+(remove)
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg remove nf1
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+key-file changes on revert (when applicable)
+--------------------------------------------
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg status
+ R nf1
+ ? x
+ $ hg revert --all
+ undeleting nf1
+ $ hg status
+ ? x
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+
+`hg update` does affect the key-file (when needed)
+--------------------------------------------------
+
+update changing the tracked set
+
+(removing)
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg status --rev . --rev '.#generations[-1]'
+ R nf10
+ $ hg up '.#generations[-1]'
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+(adding)
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg status --rev . --rev '.#generations[1]'
+ A nf10
+ $ hg up '.#generations[1]'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+ Files .hg/dirstate-tracked-hint and ../key-bck differ
+ [1]
+
+update not affecting the tracked set
+
+ $ echo foo >> nf0
+ $ hg commit -m foo
+
+ $ cp .hg/dirstate-tracked-hint ../key-bck
+ $ hg status --rev . --rev '.#generations[-1]'
+ M nf0
+ $ hg up '.#generations[-1]'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ diff --brief .hg/dirstate-tracked-hint ../key-bck
+
+Test upgrade and downgrade
+==========================
+
+ $ ls .hg/dirstate-tracked-hint
+ .hg/dirstate-tracked-hint
+ $ hg debugrequires | grep 'tracked'
+ dirstate-tracked-key-v1
+
+downgrade
+
+ $ hg debugupgraderepo --config format.use-dirstate-tracked-hint=no --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: * (glob)
+ removed: dirstate-tracked-key-v1
+
+ no revlogs to process
+
+ $ ls -1 .hg/dirstate-tracked-hint
+ ls: cannot access '.hg/dirstate-tracked-hint': $ENOENT$
+ [2]
+ $ hg debugrequires | grep 'tracked'
+ [1]
+
+upgrade
+
+ $ hg debugupgraderepo --config format.use-dirstate-tracked-hint=yes --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: * (glob)
+ added: dirstate-tracked-key-v1
+
+ no revlogs to process
+
+ $ ls -1 .hg/dirstate-tracked-hint
+ .hg/dirstate-tracked-hint
+ $ hg debugrequires | grep 'tracked'
+ dirstate-tracked-key-v1
--- a/tests/test-status.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-status.t Fri Feb 18 14:27:43 2022 +0100
@@ -218,6 +218,13 @@
! deleted
? unknown
+hg status -n:
+ $ env RHG_ON_UNSUPPORTED=abort hg status -n
+ added
+ removed
+ deleted
+ unknown
+
hg status modified added removed deleted unknown never-existed ignored:
$ hg status modified added removed deleted unknown never-existed ignored
@@ -934,6 +941,7 @@
Now the directory is eligible for caching, so its mtime is save in the dirstate
$ rm subdir/unknown
+ $ sleep 0.1 # ensure the kernel’s internal clock for mtimes has ticked
$ hg status
$ hg debugdirstate --all --no-dates | grep '^ '
0 -1 set subdir
--- a/tests/test-stream-bundle-v2.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-stream-bundle-v2.t Fri Feb 18 14:27:43 2022 +0100
@@ -45,15 +45,13 @@
$ hg bundle -a --type="none-v2;stream=v2" bundle.hg
$ hg debugbundle bundle.hg
Stream params: {}
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (no-zstd !)
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (zstd no-rust !)
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (rust no-dirstate-v2 !)
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cuse-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True) (dirstate-v2 !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlogv1%2Csparserevlog} (mandatory: True) (no-zstd !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (zstd no-rust !)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (rust !)
$ hg debugbundle --spec bundle.hg
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore (no-zstd !)
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (zstd no-rust !)
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (rust no-dirstate-v2 !)
- none-v2;stream=v2;requirements%3Ddotencode%2Cuse-dirstate-v2%2Cfncache%2Cgeneraldelta%2Cpersistent-nodemap%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog%2Cstore (dirstate-v2 !)
+ none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (no-zstd !)
+ none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (zstd no-rust !)
+ none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (rust !)
Test that we can apply the bundle as a stream clone bundle
--- a/tests/test-subrepo-deep-nested-change.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-subrepo-deep-nested-change.t Fri Feb 18 14:27:43 2022 +0100
@@ -192,7 +192,7 @@
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
Largefiles is NOT enabled in the clone if the source repo doesn't require it
- $ grep largefiles cloned/.hg/hgrc
+ $ hg debugrequires -R cloned | grep largefiles
[1]
Checking cloned repo ids
@@ -776,7 +776,7 @@
extensions.largefiles=
$ hg --config extensions.largefiles= clone -qU . ../lfclone
- $ grep largefiles ../lfclone/.hg/requires
+ $ hg debugrequires -R ../lfclone | grep largefiles
largefiles
Find an exact match to a standin (should archive nothing)
--- a/tests/test-subrepo.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-subrepo.t Fri Feb 18 14:27:43 2022 +0100
@@ -278,7 +278,7 @@
branchmerge: True, force: False, partial: False
ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4
starting 4 threads for background file closing (?)
- .hgsubstate: versions differ -> m (premerge)
+ .hgsubstate: versions differ -> m
subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec
subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg
getting subrepo t
@@ -304,7 +304,7 @@
branchmerge: True, force: False, partial: False
ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf
starting 4 threads for background file closing (?)
- .hgsubstate: versions differ -> m (premerge)
+ .hgsubstate: versions differ -> m
subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4
subrepo t: both sides changed
subrepository t diverged (local revision: 20a0db6fbf6c, remote revision: 7af322bc1198)
@@ -317,13 +317,10 @@
ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198
starting 4 threads for background file closing (?)
preserving t for resolve of t
- t: versions differ -> m (premerge)
+ t: versions differ -> m
picked tool ':merge' for t (binary False symlink False changedelete False)
merging t
my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
- t: versions differ -> m (merge)
- picked tool ':merge' for t (binary False symlink False changedelete False)
- my t@20a0db6fbf6c+ other t@7af322bc1198 ancestor t@6747d179aa9a
warning: conflicts while merging t! (edit, then use 'hg resolve --mark')
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
@@ -1021,37 +1018,21 @@
test if untracked file is not overwritten
-(this also tests that updated .hgsubstate is treated as "modified",
-when 'merge.update()' is aborted before 'merge.recordupdates()', even
-if none of mode, size and timestamp of it isn't changed on the
-filesystem (see also issue4583))
+(this tests also has a change to update .hgsubstate and merge it within the
+same second. It should mark is are modified , even if none of mode, size and
+timestamp of it isn't changed on the filesystem (see also issue4583))
$ echo issue3276_ok > repo/s/b
$ hg -R repo2 push -f -q
- $ touch -t 200001010000 repo/.hgsubstate
- $ cat >> repo/.hg/hgrc <<EOF
- > [fakedirstatewritetime]
- > # emulate invoking dirstate.write() via repo.status()
- > # at 2000-01-01 00:00
- > fakenow = 200001010000
- >
- > [extensions]
- > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
- > EOF
$ hg -R repo update
b: untracked file differs
abort: untracked files in working directory differ from files in requested revision (in subrepository "s")
[255]
- $ cat >> repo/.hg/hgrc <<EOF
- > [extensions]
- > fakedirstatewritetime = !
- > EOF
$ cat repo/s/b
issue3276_ok
$ rm repo/s/b
- $ touch -t 200001010000 repo/.hgsubstate
$ hg -R repo revert --all
reverting repo/.hgsubstate
reverting subrepo s
--- a/tests/test-tags.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-tags.t Fri Feb 18 14:27:43 2022 +0100
@@ -146,12 +146,12 @@
$ hg identify
b9154636be93 tip
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
Failure to acquire lock results in no write
@@ -160,12 +160,12 @@
$ hg identify
b9154636be93 tip
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/2 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6
$ fnodescacheexists
no fnodes cache
@@ -226,9 +226,9 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg blackbox -l3
- 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
- 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
+ 1970-01-01 00:00:00.000 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1
+ 1970-01-01 00:00:00.000 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3
$ hg id
c8edf04160c7+b9154636be93+ tip
$ hg status
@@ -364,12 +364,12 @@
bar 1:78391a272241
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 3/4 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6
On junk data + missing cache entries, hg also overwrites the junk.
@@ -474,12 +474,12 @@
bar 1:78391a272241
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno *] * (glob)
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
$ chmod a+w .hg/cache/hgtagsfnodes1
@@ -489,12 +489,12 @@
bar 1:78391a272241
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/4 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6
$ f --size .hg/cache/hgtagsfnodes1
.hg/cache/hgtagsfnodes1: size=168
@@ -518,11 +518,11 @@
bar 1:78391a272241
$ hg blackbox -l 5
- 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
+ 1970-01-01 00:00:00.000 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1
+ 1970-01-01 00:00:00.000 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/4 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5
$ f --size .hg/cache/hgtagsfnodes1
.hg/cache/hgtagsfnodes1: size=120
@@ -535,12 +535,12 @@
bar 1:78391a272241
$ hg blackbox -l 6
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 3/4 cache hits/lookups in * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob)
+ 1970-01-01 00:00:00.000 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6
$ f --size .hg/cache/hgtagsfnodes1
.hg/cache/hgtagsfnodes1: size=144
--- a/tests/test-template-functions.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-template-functions.t Fri Feb 18 14:27:43 2022 +0100
@@ -1295,10 +1295,10 @@
-1
$ hg log -T '{revset("%d", rev + 1)}\n' -r'tip'
abort: unknown revision '3'
- [255]
+ [10]
$ hg log -T '{revset("%d", rev - 1)}\n' -r'null'
abort: unknown revision '-2'
- [255]
+ [10]
Invalid arguments passed to revset()
--- a/tests/test-transplant.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-transplant.t Fri Feb 18 14:27:43 2022 +0100
@@ -1063,7 +1063,7 @@
$ cat r1
Y1
$ hg debugstate | grep ' r1$'
- n 644 3 unset r1
+ n 0 -1 unset r1
$ hg status -A r1
M r1
--- a/tests/test-treemanifest.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-treemanifest.t Fri Feb 18 14:27:43 2022 +0100
@@ -5,7 +5,7 @@
Requirements get set on init
- $ grep treemanifest .hg/requires
+ $ hg debugrequires | grep treemanifest
treemanifest
Without directories, looks like any other repo
@@ -229,7 +229,7 @@
$ cd repo-mixed
$ test -d .hg/store/meta
[1]
- $ grep treemanifest .hg/requires
+ $ hg debugrequires | grep treemanifest
treemanifest
Should be possible to push updates from flat to tree manifest repo
@@ -373,7 +373,7 @@
> [experimental]
> changegroup3=yes
> EOF
- $ grep treemanifest empty-repo/.hg/requires
+ $ hg debugrequires -R empty-repo | grep treemanifest
[1]
$ hg push -R repo -r 0 empty-repo
pushing to empty-repo
@@ -382,13 +382,13 @@
adding manifests
adding file changes
added 1 changesets with 2 changes to 2 files
- $ grep treemanifest empty-repo/.hg/requires
+ $ hg debugrequires -R empty-repo | grep treemanifest
treemanifest
Pushing to an empty repo works
$ hg --config experimental.treemanifest=1 init clone
- $ grep treemanifest clone/.hg/requires
+ $ hg debugrequires -R clone | grep treemanifest
treemanifest
$ hg push -R repo clone
pushing to clone
@@ -397,7 +397,7 @@
adding manifests
adding file changes
added 11 changesets with 15 changes to 10 files (+3 heads)
- $ grep treemanifest clone/.hg/requires
+ $ hg debugrequires -R clone | grep treemanifest
treemanifest
$ hg -R clone verify
checking changesets
@@ -682,7 +682,7 @@
No server errors.
$ cat deeprepo/errors.log
requires got updated to include treemanifest
- $ cat deepclone/.hg/requires | grep treemanifest
+ $ hg debugrequires -R deepclone | grep treemanifest
treemanifest
Tree manifest revlogs exist.
$ find deepclone/.hg/store/meta | sort
@@ -730,7 +730,7 @@
updating to branch default
8 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd deeprepo-basicstore
- $ grep store .hg/requires
+ $ hg debugrequires | grep store
[1]
$ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
$ cat hg.pid >> $DAEMON_PIDS
@@ -747,7 +747,7 @@
updating to branch default
8 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ cd deeprepo-encodedstore
- $ grep fncache .hg/requires
+ $ hg debugrequires | grep fncache
[1]
$ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
$ cat hg.pid >> $DAEMON_PIDS
@@ -829,11 +829,9 @@
$ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
writing 5330 bytes for 18 files (no-zstd !)
writing 5400 bytes for 18 files (zstd !)
- bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest (no-rust !)
- bundle requirements: generaldelta, persistent-nodemap, revlogv1, sparserevlog, treemanifest (rust !)
+ bundle requirements:.* treemanifest(,.*)? (re)
$ hg debugbundle --spec repo-packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest (no-rust !)
- none-packed1;requirements%3Dgeneraldelta%2Cpersistent-nodemap%2Crevlogv1%2Csparserevlog%2Ctreemanifest (rust !)
+ none-packed1;requirements%3D(.*%2C)?treemanifest(%2C.*)? (re)
#endif
--- a/tests/test-up-local-change.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-up-local-change.t Fri Feb 18 14:27:43 2022 +0100
@@ -46,13 +46,10 @@
b: remote created -> g
getting b
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool 'true' for a (binary False symlink False changedelete False)
merging a
my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a
- a: versions differ -> m (merge)
- picked tool 'true' for a (binary False symlink False changedelete False)
- my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a
launching merge tool: true *$TESTTMP/r2/a* * * (glob)
merge tool returned: 0
1 files updated, 1 files merged, 0 files removed, 0 files unresolved
@@ -72,13 +69,10 @@
removing b
starting 4 threads for background file closing (?)
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool 'true' for a (binary False symlink False changedelete False)
merging a
my a@1e71731e6fbb+ other a@c19d34741b0a ancestor a@1e71731e6fbb
- a: versions differ -> m (merge)
- picked tool 'true' for a (binary False symlink False changedelete False)
- my a@1e71731e6fbb+ other a@c19d34741b0a ancestor a@1e71731e6fbb
launching merge tool: true *$TESTTMP/r2/a* * * (glob)
merge tool returned: 0
0 files updated, 1 files merged, 1 files removed, 0 files unresolved
@@ -95,13 +89,10 @@
b: remote created -> g
getting b
preserving a for resolve of a
- a: versions differ -> m (premerge)
+ a: versions differ -> m
picked tool 'true' for a (binary False symlink False changedelete False)
merging a
my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a
- a: versions differ -> m (merge)
- picked tool 'true' for a (binary False symlink False changedelete False)
- my a@c19d34741b0a+ other a@1e71731e6fbb ancestor a@c19d34741b0a
launching merge tool: true *$TESTTMP/r2/a* * * (glob)
merge tool returned: 0
1 files updated, 1 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-update-branches.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-update-branches.t Fri Feb 18 14:27:43 2022 +0100
@@ -158,47 +158,47 @@
parent=3
M sub/suba
- $ revtest '-C dirty linear' dirty 1 2 -C
+ $ revtest '--clean dirty linear' dirty 1 2 --clean
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
parent=2
- $ revtest '-c dirty linear' dirty 1 2 -c
+ $ revtest '--check dirty linear' dirty 1 2 --check
abort: uncommitted changes
parent=1
M foo
- $ revtest '-m dirty linear' dirty 1 2 -m
+ $ revtest '--merge dirty linear' dirty 1 2 --merge
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
parent=2
M foo
- $ revtest '-m dirty cross' dirty 3 4 -m
+ $ revtest '--merge dirty cross' dirty 3 4 --merge
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
parent=4
M foo
- $ revtest '-c dirtysub linear' dirtysub 1 2 -c
+ $ revtest '--check dirtysub linear' dirtysub 1 2 --check
abort: uncommitted changes in subrepository "sub"
parent=1
M sub/suba
- $ norevtest '-c clean same' clean 2 -c
+ $ norevtest '--check clean same' clean 2 -c
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
updated to "bd10386d478c: 2"
1 other heads for branch "default"
parent=2
- $ revtest '-cC dirty linear' dirty 1 2 -cC
+ $ revtest '--check --clean dirty linear' dirty 1 2 "--check --clean"
abort: cannot specify both --clean and --check
parent=1
M foo
- $ revtest '-mc dirty linear' dirty 1 2 -mc
+ $ revtest '--merge -checkc dirty linear' dirty 1 2 "--merge --check"
abort: cannot specify both --check and --merge
parent=1
M foo
- $ revtest '-mC dirty linear' dirty 1 2 -mC
+ $ revtest '--merge -clean dirty linear' dirty 1 2 "--merge --clean"
abort: cannot specify both --clean and --merge
parent=1
M foo
@@ -211,12 +211,27 @@
parent=1
M foo
- $ revtest 'none dirty linear' dirty 1 2 -c
+ $ revtest 'none dirty linear' dirty 1 2 --check
+ abort: uncommitted changes
+ parent=1
+ M foo
+
+ $ revtest '--merge none dirty linear' dirty 1 2 --check
abort: uncommitted changes
parent=1
M foo
- $ revtest 'none dirty linear' dirty 1 2 -C
+ $ revtest '--merge none dirty linear' dirty 1 2 --merge
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ parent=2
+ M foo
+
+ $ revtest '--merge none dirty linear' dirty 1 2 --no-check
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ parent=2
+ M foo
+
+ $ revtest 'none dirty linear' dirty 1 2 --clean
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
parent=2
@@ -232,12 +247,17 @@
parent=2
M foo
- $ revtest 'none dirty linear' dirty 1 2 -c
+ $ revtest 'none dirty linear' dirty 1 2 --check
abort: uncommitted changes
parent=1
M foo
- $ revtest 'none dirty linear' dirty 1 2 -C
+ $ revtest 'none dirty linear' dirty 1 2 --no-merge
+ abort: uncommitted changes
+ parent=1
+ M foo
+
+ $ revtest 'none dirty linear' dirty 1 2 --clean
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
parent=2
@@ -272,14 +292,14 @@
# To mark files as resolved: hg resolve --mark FILE
$ cat a
- <<<<<<< working copy: 6efa171f091b - test: 3
+ <<<<<<< working copy: 6efa171f091b - test: 3
three
dirty
- ||||||| base
+ ||||||| working copy parent: 6efa171f091b - test: 3
three
=======
four
- >>>>>>> destination: d047485b3896 b1 - test: 4
+ >>>>>>> destination: d047485b3896 b1 - test: 4
$ rm a.orig
$ echo 'update.check = noconflict' >> .hg/hgrc
@@ -676,9 +696,8 @@
(commit or update --clean to discard changes)
[255]
-Test that we don't crash when updating from a pruned changeset (i.e. has no
-successors). Behavior should probably be that we update to the first
-non-obsolete parent but that will be decided later.
+Test that we update to the closest non-obsolete ancestor when updating from a
+pruned changeset (i.e. that has no successors)
$ hg id --debug -r 2
bd10386d478cd5a9faf2e604114c8e6da62d3889
$ hg up --quiet 0
@@ -686,21 +705,18 @@
$ hg debugobsolete bd10386d478cd5a9faf2e604114c8e6da62d3889
1 new obsolescence markers
obsoleted 1 changesets
+ $ hg log -r '_destupdate()'
+ 1:0786582aa4b1 1 (no-eol)
$ hg up
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test experimental revset support
-
- $ hg log -r '_destupdate()'
- 2:bd10386d478c 2 (no-eol)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
Test that boolean flags allow --no-flag specification to override [defaults]
$ cat >> $HGRCPATH <<EOF
> [defaults]
> update = --check
> EOF
- $ hg co 2
+ $ hg co 1
abort: uncommitted changes
[20]
- $ hg co --no-check 2
+ $ hg co --no-check 1
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-upgrade-repo.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-upgrade-repo.t Fri Feb 18 14:27:43 2022 +0100
@@ -6,6 +6,8 @@
> [format]
> # stabilize test accross variant
> revlog-compression=zlib
+ > [storage]
+ > dirstate-v2.slow-path=allow
> EOF
store and revlogv1 are required in source
@@ -30,12 +32,166 @@
Cannot upgrade shared repositories
$ hg init share-parent
+ $ hg -R share-parent debugbuilddag -n .+9
+ $ hg -R share-parent up tip
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg -q share share-parent share-child
- $ hg -R share-child debugupgraderepo
- abort: cannot upgrade repository; unsupported source requirement: shared
+ $ hg -R share-child debugupgraderepo --config format.sparse-revlog=no
+ abort: cannot use these actions on a share repository: sparserevlog
+ (upgrade the main repository directly)
[255]
+Unless the action is compatible with share
+
+ $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet
+ requirements
+ preserved: * (glob)
+ added: dirstate-v2
+
+ no revlogs to process
+
+
+ $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=yes --quiet --run
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: * (glob)
+ added: dirstate-v2
+
+ no revlogs to process
+
+ $ hg debugformat -R share-child | grep dirstate-v2
+ dirstate-v2: yes
+ $ hg debugformat -R share-parent | grep dirstate-v2
+ dirstate-v2: no
+ $ hg status --all -R share-child
+ C nf0
+ C nf1
+ C nf2
+ C nf3
+ C nf4
+ C nf5
+ C nf6
+ C nf7
+ C nf8
+ C nf9
+ $ hg log -l 3 -R share-child
+ changeset: 9:0059eb38e4a4
+ tag: tip
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:09 1970 +0000
+ summary: r9
+
+ changeset: 8:4d5be70c8130
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:08 1970 +0000
+ summary: r8
+
+ changeset: 7:e60bfe72517e
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:07 1970 +0000
+ summary: r7
+
+ $ hg status --all -R share-parent
+ C nf0
+ C nf1
+ C nf2
+ C nf3
+ C nf4
+ C nf5
+ C nf6
+ C nf7
+ C nf8
+ C nf9
+ $ hg log -l 3 -R share-parent
+ changeset: 9:0059eb38e4a4
+ tag: tip
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:09 1970 +0000
+ summary: r9
+
+ changeset: 8:4d5be70c8130
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:08 1970 +0000
+ summary: r8
+
+ changeset: 7:e60bfe72517e
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:07 1970 +0000
+ summary: r7
+
+
+ $ hg -R share-child debugupgraderepo --config format.use-dirstate-v2=no --quiet --run
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: * (glob)
+ removed: dirstate-v2
+
+ no revlogs to process
+
+ $ hg debugformat -R share-child | grep dirstate-v2
+ dirstate-v2: no
+ $ hg debugformat -R share-parent | grep dirstate-v2
+ dirstate-v2: no
+ $ hg status --all -R share-child
+ C nf0
+ C nf1
+ C nf2
+ C nf3
+ C nf4
+ C nf5
+ C nf6
+ C nf7
+ C nf8
+ C nf9
+ $ hg log -l 3 -R share-child
+ changeset: 9:0059eb38e4a4
+ tag: tip
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:09 1970 +0000
+ summary: r9
+
+ changeset: 8:4d5be70c8130
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:08 1970 +0000
+ summary: r8
+
+ changeset: 7:e60bfe72517e
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:07 1970 +0000
+ summary: r7
+
+ $ hg status --all -R share-parent
+ C nf0
+ C nf1
+ C nf2
+ C nf3
+ C nf4
+ C nf5
+ C nf6
+ C nf7
+ C nf8
+ C nf9
+ $ hg log -l 3 -R share-parent
+ changeset: 9:0059eb38e4a4
+ tag: tip
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:09 1970 +0000
+ summary: r9
+
+ changeset: 8:4d5be70c8130
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:08 1970 +0000
+ summary: r8
+
+ changeset: 7:e60bfe72517e
+ user: debugbuilddag
+ date: Thu Jan 01 00:00:07 1970 +0000
+ summary: r7
+
+
Do not yet support upgrading treemanifest repos
$ hg --config experimental.treemanifest=true init treemanifest
@@ -58,9 +214,10 @@
format-variant repo
fncache: yes
dirstate-v2: no
+ tracked-hint: no
dotencode: yes
generaldelta: yes
- share-safe: no
+ share-safe: yes
sparserevlog: yes
persistent-nodemap: no (no-rust !)
persistent-nodemap: yes (rust !)
@@ -74,9 +231,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -91,9 +249,10 @@
format-variant repo config default
fncache: yes no yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes no yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -108,9 +267,10 @@
format-variant repo config default
[formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
[formatvariant.name.mismatchdefault|persistent-nodemap:][formatvariant.repo.mismatchdefault| yes][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
@@ -136,6 +296,12 @@
"repo": false
},
{
+ "config": false,
+ "default": false,
+ "name": "tracked-hint",
+ "repo": false
+ },
+ {
"config": true,
"default": true,
"name": "dotencode",
@@ -148,10 +314,10 @@
"repo": true
},
{
- "config": false,
- "default": false,
+ "config": true,
+ "default": true,
"name": "share-safe",
- "repo": false
+ "repo": true
},
{
"config": true,
@@ -210,13 +376,10 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
additional optimizations are available by specifying "--optimize <name>":
@@ -235,13 +398,10 @@
$ hg debugupgraderepo --quiet
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
--optimize can be used to add optimizations
@@ -251,8 +411,8 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -283,8 +443,8 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -309,8 +469,8 @@
$ hg debugupgrade --optimize re-delta-parent --quiet
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -338,6 +498,7 @@
format-variant repo
fncache: no
dirstate-v2: no
+ tracked-hint: no
dotencode: no
generaldelta: no
share-safe: no
@@ -353,9 +514,10 @@
format-variant repo config default
fncache: no yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: no yes yes
generaldelta: no yes yes
- share-safe: no no no
+ share-safe: no yes yes
sparserevlog: no yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: no yes no (rust !)
@@ -370,9 +532,10 @@
format-variant repo config default
fncache: no yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: no yes yes
generaldelta: no no yes
- share-safe: no no no
+ share-safe: no yes yes
sparserevlog: no no yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: no yes no (rust !)
@@ -387,9 +550,10 @@
format-variant repo config default
[formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|dirstate-v2: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|tracked-hint: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
[formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|share-safe: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.mismatchconfig|share-safe: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no] (no-rust !)
[formatvariant.name.mismatchconfig|persistent-nodemap:][formatvariant.repo.mismatchconfig| no][formatvariant.config.special| yes][formatvariant.default| no] (rust !)
@@ -401,6 +565,10 @@
[formatvariant.name.mismatchdefault|compression: ][formatvariant.repo.mismatchdefault| zlib][formatvariant.config.special| zlib][formatvariant.default| zstd] (zstd !)
[formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugupgraderepo
+ note: selecting all-filelogs for processing to change: dotencode
+ note: selecting all-manifestlogs for processing to change: dotencode
+ note: selecting changelog for processing to change: dotencode
+
repository lacks features recommended by current config options:
fncache
@@ -412,6 +580,9 @@
generaldelta
deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+ share-safe
+ old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
+
sparserevlog
in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
@@ -423,8 +594,8 @@
requirements
preserved: revlogv1, store
- added: dotencode, fncache, generaldelta, sparserevlog (no-rust !)
- added: dotencode, fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
+ added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
+ added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -435,6 +606,9 @@
generaldelta
repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+ share-safe
+ Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
+
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
@@ -463,8 +637,8 @@
$ hg debugupgraderepo --quiet
requirements
preserved: revlogv1, store
- added: dotencode, fncache, generaldelta, sparserevlog (no-rust !)
- added: dotencode, fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
+ added: dotencode, fncache, generaldelta, share-safe, sparserevlog (no-rust !)
+ added: dotencode, fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
processed revlogs:
- all-filelogs
@@ -473,6 +647,10 @@
$ hg --config format.dotencode=false debugupgraderepo
+ note: selecting all-filelogs for processing to change: fncache
+ note: selecting all-manifestlogs for processing to change: fncache
+ note: selecting changelog for processing to change: fncache
+
repository lacks features recommended by current config options:
fncache
@@ -481,6 +659,9 @@
generaldelta
deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+ share-safe
+ old shared repositories do not share source repository requirements and config. This leads to various problems when the source repository format is upgraded or some new extensions are enabled.
+
sparserevlog
in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
@@ -497,8 +678,8 @@
requirements
preserved: revlogv1, store
- added: fncache, generaldelta, sparserevlog (no-rust !)
- added: fncache, generaldelta, persistent-nodemap, sparserevlog (rust !)
+ added: fncache, generaldelta, share-safe, sparserevlog (no-rust !)
+ added: fncache, generaldelta, persistent-nodemap, share-safe, sparserevlog (rust !)
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -506,6 +687,9 @@
generaldelta
repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+ share-safe
+ Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
+
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
@@ -567,11 +751,15 @@
.hg/store/data/f2.i
$ hg debugupgraderepo --run --config format.sparse-revlog=false
+ note: selecting all-filelogs for processing to change: generaldelta
+ note: selecting all-manifestlogs for processing to change: generaldelta
+ note: selecting changelog for processing to change: generaldelta
+
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, persistent-nodemap, revlogv1, share-safe, store (rust !)
added: generaldelta
generaldelta
@@ -596,6 +784,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: -17 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -610,20 +799,24 @@
Original requirements backed up
$ cat .hg/upgradebackup.*/requires
+ share-safe
+ $ cat .hg/upgradebackup.*/store/requires
dotencode
fncache
persistent-nodemap (rust !)
revlogv1
store
+ upgradeinprogress
generaldelta added to original requirements files
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlogv1
+ share-safe
store
store directory has files we expect
@@ -634,6 +827,7 @@
data
fncache
phaseroots
+ requires
undo
undo.backupfiles
undo.phaseroots
@@ -662,6 +856,7 @@
data
fncache
phaseroots
+ requires
undo
undo.backup.fncache
undo.backupfiles
@@ -671,11 +866,15 @@
$ rm -rf .hg/upgradebackup.*/
$ hg debugupgraderepo --run --no-backup
+ note: selecting all-filelogs for processing to change: sparserevlog
+ note: selecting all-manifestlogs for processing to change: sparserevlog
+ note: selecting changelog for processing to change: sparserevlog
+
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
added: sparserevlog
sparserevlog
@@ -700,6 +899,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -716,8 +916,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -746,6 +946,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -794,8 +995,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -825,6 +1026,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -845,8 +1047,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -875,6 +1077,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -895,8 +1098,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-parent
@@ -925,6 +1128,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -944,13 +1148,30 @@
$ echo "[format]" > .hg/hgrc
$ echo "sparse-revlog=no" >> .hg/hgrc
+ $ hg debugupgrade --optimize re-delta-parent --no-manifest --no-backup --quiet
+ warning: ignoring --no-manifest, as upgrade is changing: sparserevlog
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
+ removed: sparserevlog
+
+ optimisations: re-delta-parent
+
+ processed revlogs:
+ - all-filelogs
+ - changelog
+ - manifest
+
$ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
- ignoring revlogs selection flags, format requirements change: sparserevlog
+ note: selecting all-filelogs for processing to change: sparserevlog
+ note: selecting changelog for processing to change: sparserevlog
+
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
removed: sparserevlog
optimisations: re-delta-parent
@@ -982,6 +1203,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -1000,12 +1222,14 @@
$ echo "sparse-revlog=yes" >> .hg/hgrc
$ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback
- ignoring revlogs selection flags, format requirements change: sparserevlog
+ note: selecting all-filelogs for processing to change: sparserevlog
+ note: selecting changelog for processing to change: sparserevlog
+
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
added: sparserevlog
optimisations: re-delta-parent
@@ -1040,6 +1264,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: 0 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -1070,8 +1295,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-fulladd
@@ -1098,6 +1323,7 @@
finished migrating 3 total revisions; total change in store size: 0 bytes
copying .XX_special_filename
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -1129,25 +1355,27 @@
$ touch foo
$ hg add --large foo
$ hg -q commit -m initial
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
largefiles
persistent-nodemap (rust !)
revlogv1
+ share-safe
sparserevlog
store
$ hg debugupgraderepo --run
nothing to do
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
largefiles
persistent-nodemap (rust !)
revlogv1
+ share-safe
sparserevlog
store
@@ -1160,7 +1388,7 @@
$ echo '123456789012345' > lfs.bin
$ hg ci -Am 'lfs.bin'
adding lfs.bin
- $ grep lfs .hg/requires
+ $ hg debugrequires | grep lfs
lfs
$ find .hg/store/lfs -type f
.hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
@@ -1168,7 +1396,7 @@
$ hg debugupgraderepo --run
nothing to do
- $ grep lfs .hg/requires
+ $ hg debugrequires | grep lfs
lfs
$ find .hg/store/lfs -type f
.hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
@@ -1236,8 +1464,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
optimisations: re-delta-all
@@ -1263,6 +1491,7 @@
finished migrating 3 changelog revisions; change in size: 0 bytes
finished migrating 9 total revisions; total change in store size: -9 bytes
copying phaseroots
+ copying requires
data fully upgraded in a temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
starting in-place swap of repository data
@@ -1293,12 +1522,13 @@
$ touch foo
$ hg add foo
$ hg -q commit -m "foo"
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlogv1
+ share-safe
store
Check that we can add the sparse-revlog format requirement
@@ -1306,8 +1536,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
added: sparserevlog
processed revlogs:
@@ -1315,12 +1545,13 @@
- changelog
- manifest
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlogv1
+ share-safe
sparserevlog
store
@@ -1329,8 +1560,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
removed: sparserevlog
processed revlogs:
@@ -1338,12 +1569,13 @@
- changelog
- manifest
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlogv1
+ share-safe
store
#if zstd
@@ -1357,8 +1589,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, store (rust !)
added: revlog-compression-zstd, sparserevlog
processed revlogs:
@@ -1370,9 +1602,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1383,13 +1616,14 @@
compression: zlib zlib zlib (no-zstd !)
compression: zstd zlib zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd
revlogv1
+ share-safe
sparserevlog
store
@@ -1399,8 +1633,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
removed: revlog-compression-zstd
processed revlogs:
@@ -1412,9 +1646,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1425,12 +1660,13 @@
compression: zlib zlib zlib (no-zstd !)
compression: zlib zlib zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlogv1
+ share-safe
sparserevlog
store
@@ -1444,8 +1680,8 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (rust !)
added: revlog-compression-zstd
processed revlogs:
@@ -1457,9 +1693,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1470,13 +1707,14 @@
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd
revlogv1
+ share-safe
sparserevlog
store
@@ -1492,9 +1730,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, share-safe, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
removed: revlogv1
added: exp-revlogv2.2 (zstd !)
added: exp-revlogv2.2, sparserevlog (no-zstd !)
@@ -1508,9 +1746,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1521,13 +1760,14 @@
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
exp-revlogv2.2
fncache
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
+ share-safe
sparserevlog
store
$ hg debugsidedata -c 0
@@ -1541,9 +1781,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
removed: exp-revlogv2.2
added: revlogv1
@@ -1556,9 +1796,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1569,13 +1810,14 @@
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
fncache
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
revlogv1
+ share-safe
sparserevlog
store
$ hg debugsidedata -c 0
@@ -1590,9 +1832,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
- preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
- preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
+ preserved: dotencode, fncache, generaldelta, share-safe, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, share-safe, sparserevlog, store (zstd no-rust !)
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, share-safe, sparserevlog, store (rust !)
removed: revlogv1
added: exp-revlogv2.2
@@ -1605,9 +1847,10 @@
format-variant repo config default
fncache: yes yes yes
dirstate-v2: no no no
+ tracked-hint: no no no
dotencode: yes yes yes
generaldelta: yes yes yes
- share-safe: no no no
+ share-safe: yes yes yes
sparserevlog: yes yes yes
persistent-nodemap: no no no (no-rust !)
persistent-nodemap: yes yes no (rust !)
@@ -1618,13 +1861,14 @@
compression: zlib zlib zlib (no-zstd !)
compression: zstd zstd zstd (zstd !)
compression-level: default default default
- $ cat .hg/requires
+ $ hg debugrequires
dotencode
exp-revlogv2.2
fncache
generaldelta
persistent-nodemap (rust !)
revlog-compression-zstd (zstd !)
+ share-safe
sparserevlog
store
$ hg debugsidedata -c 0
@@ -1657,10 +1901,7 @@
dirstate-v2
"hg status" will be faster
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
@@ -1686,10 +1927,7 @@
preserved: * (glob)
removed: dirstate-v2
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
@@ -1724,10 +1962,7 @@
dirstate-v2
"hg status" will be faster
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
@@ -1748,10 +1983,7 @@
preserved: * (glob)
removed: dirstate-v2
- processed revlogs:
- - all-filelogs
- - changelog
- - manifest
+ no revlogs to process
beginning upgrade...
repository locked and read-only
--- a/tests/test-walk.t Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/test-walk.t Fri Feb 18 14:27:43 2022 +0100
@@ -299,10 +299,10 @@
f mammals/skunk skunk
$ hg debugwalk -v .hg
abort: path 'mammals/.hg' is inside nested repo 'mammals'
- [255]
+ [10]
$ hg debugwalk -v ../.hg
abort: path contains illegal component: .hg
- [255]
+ [10]
$ cd ..
$ hg debugwalk -v -Ibeans
@@ -410,16 +410,16 @@
[255]
$ hg debugwalk -v .hg
abort: path contains illegal component: .hg
- [255]
+ [10]
$ hg debugwalk -v beans/../.hg
abort: path contains illegal component: .hg
- [255]
+ [10]
$ hg debugwalk -v beans/../.hg/data
abort: path contains illegal component: .hg/data
- [255]
+ [10]
$ hg debugwalk -v beans/.hg
abort: path 'beans/.hg' is inside nested repo 'beans'
- [255]
+ [10]
Test explicit paths and excludes:
--- a/tests/test-wireproto-caching.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,468 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
-persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
-
- $ cat >> $HGRCPATH << EOF
- > [format]
- > use-persistent-nodemap = no
- > [extensions]
- > blackbox =
- > [blackbox]
- > track = simplecache
- > EOF
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat >> .hg/hgrc << EOF
- > [extensions]
- > simplecache = $TESTDIR/wireprotosimplecache.py
- > EOF
-
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
- $ echo a1 > a
- $ hg commit -m 'commit 1'
- $ echo b1 > b
- $ hg commit -m 'commit 2'
- $ echo a2 > a
- $ echo b2 > b
- $ hg commit -m 'commit 3'
-
- $ hg log -G -T '{rev}:{node} {desc}'
- @ 3:50590a86f3ff5d1e9a1624a7a6957884565cc8e8 commit 3
- |
- o 2:4d01eda50c6ac5f7e89cbe1880143a32f559c302 commit 2
- |
- o 1:4432d83626e8a98655f062ec1f2a43b07f7fbbb0 commit 1
- |
- o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
-
-
- $ hg --debug debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
- 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
- 2 2 a8853dafacfca6fc807055a660d8b835141a3bb4 a988fb43583e871d1ed5750ee074c6d840bbbfc8 0000000000000000000000000000000000000000
- 3 3 3fe11dfbb13645782b0addafbe75a87c210ffddc a8853dafacfca6fc807055a660d8b835141a3bb4 0000000000000000000000000000000000000000
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Performing the same request should result in same result, with 2nd response
-coming from cache.
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Sending different request doesn't yield cache hit.
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41', b'\xa9\x88\xfb\x43\x58\x3e\x87\x1d\x1e\xd5\x75\x0e\xe0\x74\xc6\xd8\x40\xbb\xbf\xc8']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'node': b'\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
- b'parents': [
- b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
- $ cat .hg/blackbox.log
- *> cacher constructed for manifestdata (glob)
- *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> cacher constructed for manifestdata (glob)
- *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> cacher constructed for manifestdata (glob)
- *> cache miss for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob)
- *> storing cache entry for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob)
-
- $ cat error.log
-
- $ killdaemons.py
- $ rm .hg/blackbox.log
-
-Try with object caching mode
-
- $ cat >> .hg/hgrc << EOF
- > [simplecache]
- > cacheobjects = true
- > EOF
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
- $ cat .hg/blackbox.log
- *> cacher constructed for manifestdata (glob)
- *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> cacher constructed for manifestdata (glob)
- *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
-
- $ cat error.log
-
- $ killdaemons.py
- $ rm .hg/blackbox.log
-
-A non-cacheable command does not instantiate cacher
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
- $ sendhttpv2peer << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- sending capabilities command
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ]
- }
- ]
-
- $ test -f .hg/blackbox.log
- [1]
-
-An error is not cached
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa (esc)
- [255]
-
- $ cat .hg/blackbox.log
- *> cacher constructed for manifestdata (glob)
- *> cache miss for 2cba2a7d0d1575fea2fe68f597e97a7c2ac2f705 (glob)
- *> cacher exiting due to error (glob)
-
- $ killdaemons.py
- $ rm .hg/blackbox.log
--- a/tests/test-wireproto-command-branchmap.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ hg debugdrawdag << EOF
- > C D
- > |/
- > B
- > |
- > A
- > EOF
-
- $ hg up B
- 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch branch1
- marked working directory as branch branch1
- (branches are permanent and global, did you want a bookmark?)
- $ echo b1 > foo
- $ hg -q commit -A -m 'branch 1'
- $ hg up B
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg branch branch2
- marked working directory as branch branch2
- $ echo b2 > foo
- $ hg -q commit -A -m 'branch 2'
-
- $ hg log -T '{rev}:{node} {branch} {desc}\n'
- 5:224161c7589aa48fa83a48feff5e95b56ae327fc branch2 branch 2
- 4:b5faacdfd2633768cb3152336cc0953381266688 branch1 branch 1
- 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 default D
- 2:26805aba1e600a82e93661149f2313866a221a7b default C
- 1:112478962961147124edd43549aedd1a335e44bf default B
- 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 default A
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-No arguments returns something reasonable
-
- $ sendhttpv2peer << EOF
- > command branchmap
- > EOF
- creating http peer for wire protocol version 2
- sending branchmap command
- response: {
- b'branch1': [
- b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88'
- ],
- b'branch2': [
- b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc'
- ],
- b'default': [
- b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{',
- b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82'
- ]
- }
-
- $ cat error.log
--- a/tests/test-wireproto-command-capabilities.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,762 +0,0 @@
-#require no-chg
-
- $ . $TESTDIR/wireprotohelpers.sh
-
-persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
-
- $ cat >> $HGRCPATH << EOF
- > [format]
- > use-persistent-nodemap = no
- > EOF
-
- $ hg init server
-
-zstd isn't present in plain builds. Make tests easier by removing
-zstd from the equation.
-
- $ cat >> server/.hg/hgrc << EOF
- > [server]
- > compressionengines = zlib
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-A normal capabilities request is serviced for version 1
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-
-A proper request without the API server enabled returns the legacy response
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: foo
- > x-hgproto-1: cbor
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: foo\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-
-Restart with just API server enabled. This enables serving the new format.
-
- $ killdaemons.py
- $ cat error.log
-
- $ cat >> server/.hg/hgrc << EOF
- > [experimental]
- > web.apiserver = true
- > EOF
-
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-X-HgUpgrade-<N> without CBOR advertisement uses legacy response
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: foo bar
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgupgrade-1: foo bar\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-
-X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: foo bar
- > x-hgproto-1: some value
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgproto-1: some value\r\n
- s> x-hgupgrade-1: foo bar\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 Script output follows\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-
-X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: foo bar
- > x-hgproto-1: cbor
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: foo bar\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- cbor> [
- {
- b'apibase': b'api/',
- b'apis': {},
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
- }
- ]
-
-Restart server to enable HTTPv2
-
- $ killdaemons.py
- $ enablehttpv2 server
- $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Only requested API services are returned
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: foo bar
- > x-hgproto-1: cbor
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: foo bar\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- cbor> [
- {
- b'apibase': b'api/',
- b'apis': {},
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
- }
- ]
-
-Request for HTTPv2 service returns information about it
-
- $ sendhttpraw << EOF
- > httprequest GET ?cmd=capabilities
- > user-agent: test
- > x-hgupgrade-1: exp-http-v2-0003 foo bar
- > x-hgproto-1: cbor
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003 foo bar\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- cbor> [
- {
- b'apibase': b'api/',
- b'apis': {
- b'exp-http-v2-0003': {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ]
- }
- },
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
- }
- ]
-
-capabilities command returns expected info
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: *\r\n (glob)
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- sending capabilities command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 63\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x13\x00\x00\x01\x00\x01\x00\x11\xa1DnameLcapabilities
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 65e\r\n
- s> V\x06\x00\x01\x00\x02\x041
- s> \xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1Lsparserevlog
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ]
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat error.log
--- a/tests/test-wireproto-command-changesetdata.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,613 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat >> .hg/hgrc << EOF
- > [phases]
- > publish = false
- > EOF
- $ echo a0 > a
- $ echo b0 > b
-
- $ hg -q commit -A -m 'commit 0'
-
- $ echo a1 > a
- $ echo b1 > b
- $ hg commit -m 'commit 1'
- $ echo b2 > b
- $ hg commit -m 'commit 2'
- $ hg phase --public -r .
-
- $ hg -q up -r 0
- $ echo a2 > a
- $ hg commit -m 'commit 3'
- created new head
-
- $ hg log -G -T '{rev}:{node} {desc}\n'
- @ 3:eae5f82c2e622368d27daecb76b7e393d0f24211 commit 3
- |
- | o 2:0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd commit 2
- | |
- | o 1:7592917e1c3e82677cb0a4bc715ca25dd12d28c1 commit 1
- |/
- o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
-
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-No arguments is an invalid request
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: missing required arguments: revisions
- [255]
-
-Missing nodes for changesetexplicit results in error
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetexplicit'}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: nodes key not present in changesetexplicit revision specifier
- [255]
-
-changesetexplicitdepth requires nodes and depth keys
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetexplicitdepth'}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: nodes key not present in changesetexplicitdepth revision specifier
- [255]
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetexplicitdepth', b'nodes': []}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: depth key not present in changesetexplicitdepth revision specifier
- [255]
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetexplicitdepth', b'depth': 42}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: nodes key not present in changesetexplicitdepth revision specifier
- [255]
-
-changesetdagrange requires roots and heads keys
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetdagrange'}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: roots key not present in changesetdagrange revision specifier
- [255]
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetdagrange', b'roots': []}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: heads key not present in changesetdagrange revision specifier
- [255]
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetdagrange', b'heads': [b'dummy']}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: roots key not present in changesetdagrange revision specifier
- [255]
-
-Empty changesetdagrange heads results in an error
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{b'type': b'changesetdagrange', b'heads': [], b'roots': []}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- abort: heads key in changesetdagrange cannot be empty
- [255]
-
-Sending just dagrange heads sends all revisions
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 4
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- },
- {
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-Sending root nodes limits what data is sent
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- }
- ]
-
-Requesting data on a single node by node works
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a']}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- }
- ]
-
-Specifying a noderange and nodes takes union
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[
- > {
- > b'type': b'changesetexplicit',
- > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
- > },
- > {
- > b'type': b'changesetdagrange',
- > b'roots': [b'\x75\x92\x91\x7e\x1c\x3e\x82\x67\x7c\xb0\xa4\xbc\x71\x5c\xa2\x5d\xd1\x2d\x28\xc1'],
- > b'heads': [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'],
- > }]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- }
- ]
-
-nodesdepth of 1 limits to exactly requested nodes
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetexplicitdepth',
- > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
- > b'depth': 1}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-nodesdepth of 2 limits to first ancestor
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetexplicitdepth',
- > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
- > b'depth': 2}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-nodesdepth with multiple nodes
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > revisions eval:[{
- > b'type': b'changesetexplicitdepth',
- > b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'],
- > b'depth': 2}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 4
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- },
- {
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-Parents data is transferred upon request
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'parents']
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
- b'parents': [
- b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Phase data is transferred upon request
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'phase']
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd',
- b'phase': b'public'
- }
- ]
-
-Revision data is transferred upon request
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'revision']
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 61
- ]
- ],
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- },
- b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
- ]
-
-Bookmarks key isn't present if no bookmarks data
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'bookmarks']
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 4
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- },
- {
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- {
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- },
- {
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-Bookmarks are sent when requested
-
- $ hg -R ../server bookmark -r 0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd book-1
- $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-2
- $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-3
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'bookmarks']
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 4
- },
- {
- b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
- },
- {
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- {
- b'bookmarks': [
- b'book-1'
- ],
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- },
- {
- b'bookmarks': [
- b'book-2',
- b'book-3'
- ],
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-Bookmarks are sent when we make a no-new-revisions request
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'bookmarks', b'revision']
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 63
- ]
- ],
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
- },
- b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1',
- {
- b'bookmarks': [
- b'book-1'
- ],
- b'fieldsfollowing': [
- [
- b'revision',
- 61
- ]
- ],
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
- },
- b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2',
- {
- b'bookmarks': [
- b'book-2',
- b'book-3'
- ],
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
- }
- ]
-
-Multiple fields can be transferred
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'parents', b'revision']
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 61
- ]
- ],
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
- b'parents': [
- b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
- ]
-
-Base nodes have just their metadata (e.g. phase) transferred
-TODO this doesn't work
-
- $ sendhttpv2peer << EOF
- > command changesetdata
- > fields eval:[b'phase', b'parents', b'revision']
- > revisions eval:[{
- > b'type': b'changesetdagrange',
- > b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'],
- > b'heads': [
- > b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
- > b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending changesetdata command
- response: gen[
- {
- b'totalitems': 3
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 63
- ]
- ],
- b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1',
- b'parents': [
- b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ],
- b'phase': b'public'
- },
- b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1',
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 61
- ]
- ],
- b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd',
- b'parents': [
- b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ],
- b'phase': b'public'
- },
- b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2',
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 61
- ]
- ],
- b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
- b'parents': [
- b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ],
- b'phase': b'draft'
- },
- b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-filedata.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,364 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat > a << EOF
- > a0
- > 00000000000000000000000000000000000000
- > 11111111111111111111111111111111111111
- > EOF
- $ echo b0 > b
- $ mkdir -p dir0/child0 dir0/child1 dir1
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ echo e0 > dir0/child0/e
- $ echo f0 > dir0/child1/f
- $ hg -q commit -A -m 'commit 0'
-
- $ echo a1 >> a
- $ echo d1 > dir0/d
- $ hg commit -m 'commit 1'
- $ echo f1 > dir0/child1/f
- $ hg commit -m 'commit 2'
-
- $ hg -q up -r 0
- $ echo a2 >> a
- $ hg commit -m 'commit 3'
- created new head
-
-Create multiple heads introducing the same changeset
-
- $ hg -q up -r 0
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 1'
- adding dupe-file
- created new head
- $ hg -q up -r 0
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 2'
- adding dupe-file
- created new head
-
- $ hg log -G -T '{rev}:{node} {desc}\n'
- @ 5:732c3dd7bee94242de656000e5f458e7ccfe2828 dupe 2
- |
- | o 4:4334f10897d13c3e8beb4b636f7272b4ec2d0322 dupe 1
- |/
- | o 3:5ce944d7fece1252dae06c34422b573c191b9489 commit 3
- |/
- | o 2:b3c27db01410dae01e5485d425b1440078df540c commit 2
- | |
- | o 1:3ef5e551f219ba505481d34d6b0316b017fa3f00 commit 1
- |/
- o 0:91b232a2253ce0638496f67bdfd7a4933fb51b25 commit 0
-
-
- $ hg --debug debugindex a
- rev linkrev nodeid p1 p2
- 0 0 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
- 1 1 0a86321f1379d1a9ecd0579a22977af7a5acaf11 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
- 2 3 7e5801b6d5f03a5a54f3c47b583f7567aad43e5b 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
-
- $ hg --debug debugindex dir0/child0/e
- rev linkrev nodeid p1 p2
- 0 0 bbba6c06b30f443d34ff841bc985c4d0827c6be4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
-
- $ hg --debug debugindex dupe-file
- rev linkrev nodeid p1 p2
- 0 4 2ed2a3912a0b24502043eae84ee4b279c18b90dd 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Missing arguments is an error
-
- $ sendhttpv2peer << EOF
- > command filedata
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- abort: missing required arguments: nodes, path
- [255]
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[]
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- abort: missing required arguments: path
- [255]
-
-Unknown node is an error
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
- > path eval:b'a'
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- abort: unknown file node: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- [255]
-
-Fetching a single revision returns just metadata by default
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- }
- ]
-
-Requesting parents works
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11',
- b'parents': [
- b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Requesting revision data works
-(haveparents defaults to False, so fulltext is emitted)
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 84
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n'
- ]
-
-haveparents=False should be same as above
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > fields eval:[b'revision']
- > haveparents eval:False
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 84
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n'
- ]
-
-haveparents=True should emit a delta
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > fields eval:[b'revision']
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
- ]
-
-Requesting multiple revisions works
-(first revision is a fulltext since haveparents=False by default)
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39', b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
- > path eval:b'a'
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
- {
- b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
- ]
-
-Revisions are sorted by DAG order, parents first
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11', b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39']
- > path eval:b'a'
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
- {
- b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
- ]
-
-Requesting parents and revision data works
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x7e\x58\x01\xb6\xd5\xf0\x3a\x5a\x54\xf3\xc4\x7b\x58\x3f\x75\x67\xaa\xd4\x3e\x5b']
- > path eval:b'a'
- > fields eval:[b'parents', b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 84
- ]
- ],
- b'node': b'~X\x01\xb6\xd5\xf0:ZT\xf3\xc4{X?ug\xaa\xd4>[',
- b'parents': [
- b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na2\n'
- ]
-
-Linknode for duplicate revision is the initial revision
-
- $ sendhttpv2peer << EOF
- > command filedata
- > nodes eval:[b'\x2e\xd2\xa3\x91\x2a\x0b\x24\x50\x20\x43\xea\xe8\x4e\xe4\xb2\x79\xc1\x8b\x90\xdd']
- > path eval:b'dupe-file'
- > fields eval:[b'linknode', b'parents', b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filedata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 4
- ]
- ],
- b'linknode': b'C4\xf1\x08\x97\xd1<>\x8b\xebKcorr\xb4\xec-\x03"',
- b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- b'foo\n'
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-filesdata.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1298 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat > a << EOF
- > a0
- > 00000000000000000000000000000000000000
- > 11111111111111111111111111111111111111
- > EOF
- $ cat > b << EOF
- > b0
- > aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- > bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
- > EOF
- $ mkdir -p dir0/child0 dir0/child1 dir1
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ echo e0 > dir0/child0/e
- $ echo f0 > dir0/child1/f
- $ hg -q commit -A -m 'commit 0'
-
- $ echo a1 >> a
- $ echo d1 > dir0/d
- $ echo g0 > g
- $ echo h0 > h
- $ hg -q commit -A -m 'commit 1'
- $ echo f1 > dir0/child1/f
- $ echo i0 > dir0/i
- $ hg -q commit -A -m 'commit 2'
-
- $ hg -q up -r 0
- $ echo a2 >> a
- $ hg commit -m 'commit 3'
- created new head
-
-Create multiple heads introducing the same file nodefile node
-
- $ hg -q up -r 0
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 1'
- adding dupe-file
- created new head
- $ hg -q up -r 0
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 2'
- adding dupe-file
- created new head
-
- $ hg log -G -T '{rev}:{node} {desc}\n'
- @ 5:47fc30580911232cb264675b402819deddf6c6f0 dupe 2
- |
- | o 4:b16cce2967c1749ef4f4e3086a806cfbad8a3af7 dupe 1
- |/
- | o 3:476fbf122cd82f6726f0191ff146f67140946abc commit 3
- |/
- | o 2:b91c03cbba3519ab149b6cd0a0afbdb5cf1b5c8a commit 2
- | |
- | o 1:5b0b1a23577e205ea240e39c9704e28d7697cbd8 commit 1
- |/
- o 0:6e875ff18c227659ad6143bb3580c65700734884 commit 0
-
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Missing arguments is an error
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- abort: missing required arguments: revisions
- [255]
-
-Bad pattern to pathfilter is rejected
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > pathfilter eval:{b'include': [b'bad:foo']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- abort: include pattern must begin with `path:` or `rootfilesin:`; got bad:foo
- [255]
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > pathfilter eval:{b'exclude': [b'glob:foo']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- abort: exclude pattern must begin with `path:` or `rootfilesin:`; got glob:foo
- [255]
-
-Fetching a single changeset without parents fetches all files
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 8,
- b'totalpaths': 8
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 1
- },
- {
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-Fetching a single changeset saying parents data is available fetches just new files
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 4,
- b'totalpaths': 4
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-A path filter for a sub-directory is honored
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > haveparents eval:True
- > pathfilter eval:{b'include': [b'path:dir0']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 1,
- b'totalpaths': 1
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- }
- ]
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > haveparents eval:True
- > pathfilter eval:{b'exclude': [b'path:a', b'path:g']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 2,
- b'totalpaths': 2
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-Requesting multiple changeset nodes without haveparents sends all data for both
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
- > ]}]
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 10,
- b'totalpaths': 9
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 2
- },
- {
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- {
- b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'dir0/i',
- b'totalitems': 1
- },
- {
- b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-Requesting multiple changeset nodes with haveparents sends incremental data for both
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
- > ]}]
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 6,
- b'totalpaths': 6
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 1
- },
- {
- b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'dir0/i',
- b'totalitems': 1
- },
- {
- b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-Requesting parents works
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 8,
- b'totalpaths': 8
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11',
- b'parents': [
- b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 1
- },
- {
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G',
- b'parents': [
- b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Requesting revision data works
-(haveparents defaults to False, so fulltext is emitted)
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 8,
- b'totalpaths': 8
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 84
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n',
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- b'c0\n',
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- b'e0\n',
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- b'f0\n',
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- b'd1\n',
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- b'g0\n',
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- },
- b'h0\n'
- ]
-
-haveparents=False should be same as above
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > fields eval:[b'revision']
- > haveparents eval:False
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 8,
- b'totalpaths': 8
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 84
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n',
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- b'c0\n',
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- b'e0\n',
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- b'f0\n',
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- b'd1\n',
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- b'g0\n',
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- },
- b'h0\n'
- ]
-
-haveparents=True should emit a delta
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > ]}]
- > fields eval:[b'revision']
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 4,
- b'totalpaths': 4
- },
- {
- b'path': b'a',
- b'totalitems': 1
- },
- {
- b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n',
- {
- b'path': b'dir0/d',
- b'totalitems': 1
- },
- {
- b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n',
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- b'g0\n',
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- },
- b'h0\n'
- ]
-
-Requesting multiple revisions works
-(first revision is a fulltext since haveparents=False by default)
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84',
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
- > ]}]
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 12,
- b'totalpaths': 9
- },
- {
- b'path': b'a',
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
- },
- b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
- {
- b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n',
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 81
- ]
- ],
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- b'c0\n',
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- b'e0\n',
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- b'f0\n',
- {
- b'deltabasenode': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
- },
- b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03f1\n',
- {
- b'path': b'dir0/d',
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&'
- },
- b'd0\n',
- {
- b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
- b'fieldsfollowing': [
- [
- b'delta',
- 15
- ]
- ],
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n',
- {
- b'path': b'dir0/i',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
- },
- b'i0\n',
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- b'g0\n',
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 3
- ]
- ],
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- },
- b'h0\n'
- ]
-
-Requesting linknode field works
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84',
- > b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
- > b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
- > ]}]
- > fields eval:[b'linknode']
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 12,
- b'totalpaths': 9
- },
- {
- b'path': b'a',
- b'totalitems': 2
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
- },
- {
- b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
- b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
- },
- {
- b'path': b'b',
- b'totalitems': 1
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
- },
- {
- b'path': b'dir0/c',
- b'totalitems': 1
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
- },
- {
- b'path': b'dir0/child0/e',
- b'totalitems': 1
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
- },
- {
- b'path': b'dir0/child1/f',
- b'totalitems': 2
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
- },
- {
- b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a',
- b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
- },
- {
- b'path': b'dir0/d',
- b'totalitems': 2
- },
- {
- b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
- b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&'
- },
- {
- b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
- b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
- },
- {
- b'path': b'dir0/i',
- b'totalitems': 1
- },
- {
- b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a',
- b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
- },
- {
- b'path': b'g',
- b'totalitems': 1
- },
- {
- b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
- b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
- },
- {
- b'path': b'h',
- b'totalitems': 1
- },
- {
- b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
- b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
- }
- ]
-
-Test behavior where a file node is introduced in 2 DAG heads
-
-Request for changeset introducing filenode returns linknode as self
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7',
- > ]}]
- > fields eval:[b'linknode']
- > pathfilter eval:{b'include': [b'path:dupe-file']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 1,
- b'totalpaths': 1
- },
- {
- b'path': b'dupe-file',
- b'totalitems': 1
- },
- {
- b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7',
- b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
- }
- ]
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7',
- > ]}]
- > fields eval:[b'linknode']
- > haveparents eval:True
- > pathfilter eval:{b'include': [b'path:dupe-file']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 1,
- b'totalpaths': 1
- },
- {
- b'path': b'dupe-file',
- b'totalitems': 1
- },
- {
- b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7',
- b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
- }
- ]
-
-Request for changeset where recorded linknode isn't in DAG ancestry will get
-rewritten accordingly
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0',
- > ]}]
- > fields eval:[b'linknode']
- > pathfilter eval:{b'include': [b'path:dupe-file']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 1,
- b'totalpaths': 1
- },
- {
- b'path': b'dupe-file',
- b'totalitems': 1
- },
- {
- b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0',
- b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
- }
- ]
-
- $ sendhttpv2peer << EOF
- > command filesdata
- > revisions eval:[{
- > b'type': b'changesetexplicit',
- > b'nodes': [
- > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0',
- > ]}]
- > fields eval:[b'linknode']
- > haveparents eval:True
- > pathfilter eval:{b'include': [b'path:dupe-file']}
- > EOF
- creating http peer for wire protocol version 2
- sending filesdata command
- response: gen[
- {
- b'totalitems': 1,
- b'totalpaths': 1
- },
- {
- b'path': b'dupe-file',
- b'totalitems': 1
- },
- {
- b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0',
- b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
- }
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-heads.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ hg debugdrawdag << EOF
- > H I J
- > | | |
- > E F G
- > | |/
- > C D
- > |/
- > B
- > |
- > A
- > EOF
-
- $ hg phase --force --secret J
- $ hg phase --public E
-
- $ hg log -r 'E + H + I + G + J' -T '{rev}:{node} {desc} {phase}\n'
- 4:78d2dca436b2f5b188ac267e29b81e07266d38fc E public
- 7:ae492e36b0c8339ffaf328d00b85b4525de1165e H draft
- 8:1d6f6b91d44aaba6d5e580bc30a9948530dbe00b I draft
- 6:29446d2dc5419c5f97447a8bc062e4cc328bf241 G draft
- 9:dec04b246d7cbb670c6689806c05ad17c835284e J secret
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-All non-secret heads returned by default
-
- $ sendhttpv2peer << EOF
- > command heads
- > EOF
- creating http peer for wire protocol version 2
- sending heads command
- response: [
- b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b',
- b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^',
- b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A'
- ]
-
-Requesting just the public heads works
-
- $ sendhttpv2peer << EOF
- > command heads
- > publiconly 1
- > EOF
- creating http peer for wire protocol version 2
- sending heads command
- response: [
- b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc'
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-known.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ hg debugdrawdag << EOF
- > C D
- > |/
- > B
- > |
- > A
- > EOF
-
- $ hg log -T '{rev}:{node} {desc}\n'
- 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 D
- 2:26805aba1e600a82e93661149f2313866a221a7b C
- 1:112478962961147124edd43549aedd1a335e44bf B
- 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 A
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-No arguments returns something reasonable
-
- $ sendhttpv2peer << EOF
- > command known
- > EOF
- creating http peer for wire protocol version 2
- sending known command
- response: []
-
-Single known node works
-
- $ sendhttpv2peer << EOF
- > command known
- > nodes eval:[b'\x42\x6b\xad\xa5\xc6\x75\x98\xca\x65\x03\x6d\x57\xd9\xe4\xb6\x4b\x0c\x1c\xe7\xa0']
- > EOF
- creating http peer for wire protocol version 2
- sending known command
- response: [
- True
- ]
-
-Multiple nodes works
-
- $ sendhttpv2peer << EOF
- > command known
- > nodes eval:[b'\x42\x6b\xad\xa5\xc6\x75\x98\xca\x65\x03\x6d\x57\xd9\xe4\xb6\x4b\x0c\x1c\xe7\xa0', b'00000000000000000000', b'\x11\x24\x78\x96\x29\x61\x14\x71\x24\xed\xd4\x35\x49\xae\xdd\x1a\x33\x5e\x44\xbf']
- > EOF
- creating http peer for wire protocol version 2
- sending known command
- response: [
- True,
- False,
- True
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-listkeys.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ hg debugdrawdag << EOF
- > C D
- > |/
- > B
- > |
- > A
- > EOF
-
- $ hg phase --public -r C
- $ hg book -r C @
-
- $ hg log -T '{rev}:{node} {desc}\n'
- 3:be0ef73c17ade3fc89dc41701eb9fc3a91b58282 D
- 2:26805aba1e600a82e93661149f2313866a221a7b C
- 1:112478962961147124edd43549aedd1a335e44bf B
- 0:426bada5c67598ca65036d57d9e4b64b0c1ce7a0 A
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Request for namespaces works
-
- $ sendhttpv2peer << EOF
- > command listkeys
- > namespace namespaces
- > EOF
- creating http peer for wire protocol version 2
- sending listkeys command
- response: {
- b'bookmarks': b'',
- b'namespaces': b'',
- b'phases': b''
- }
-
-Request for phases works
-
- $ sendhttpv2peer << EOF
- > command listkeys
- > namespace phases
- > EOF
- creating http peer for wire protocol version 2
- sending listkeys command
- response: {
- b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1',
- b'publishing': b'True'
- }
-
-Request for bookmarks works
-
- $ sendhttpv2peer << EOF
- > command listkeys
- > namespace bookmarks
- > EOF
- creating http peer for wire protocol version 2
- sending listkeys command
- response: {
- b'@': b'26805aba1e600a82e93661149f2313866a221a7b'
- }
-
- $ cat error.log
--- a/tests/test-wireproto-command-lookup.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat >> .hg/hgrc << EOF
- > [web]
- > push_ssl = false
- > allow-push = *
- > EOF
- $ hg debugdrawdag << EOF
- > C D
- > |/
- > B
- > |
- > A
- > EOF
- $ root_node=$(hg log -r A -T '{node}')
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-lookup for known node works
-
- $ sendhttpv2peer << EOF
- > command lookup
- > key $root_node
- > EOF
- creating http peer for wire protocol version 2
- sending lookup command
- response: b'Bk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0'
-
- $ cat error.log
--- a/tests/test-wireproto-command-manifestdata.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,358 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ echo a0 > a
- $ echo b0 > b
- $ mkdir -p dir0/child0 dir0/child1 dir1
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ echo e0 > dir0/child0/e
- $ echo f0 > dir0/child1/f
- $ hg -q commit -A -m 'commit 0'
-
- $ echo a1 > a
- $ echo d1 > dir0/d
- $ hg commit -m 'commit 1'
- $ echo f0 > dir0/child1/f
- $ hg commit -m 'commit 2'
- nothing changed
- [1]
-
- $ hg -q up -r 0
- $ echo a2 > a
- $ hg commit -m 'commit 3'
- created new head
-
- $ hg log -G -T '{rev}:{node} {desc}\n'
- @ 2:c8757a2ffe552850d1e0dfe60d295ebf64c196d9 commit 3
- |
- | o 1:650165e803375748a94df471e5b58d85763e0b29 commit 1
- |/
- o 0:6d85ca1270b377d320098556ba5bfad34a9ee12d commit 0
-
-
- $ hg --debug debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
- 1 1 91e0bdbfb0dde0023fa063edc1445f207a22eac7 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000
- 2 2 46a6721b5edaf0ea04b79a5cb3218854a4d2aba0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Missing arguments is an error
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- abort: missing required arguments: nodes, tree
- [255]
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[]
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- abort: missing required arguments: tree
- [255]
-
-Unknown node is an error
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
- > tree eval:b''
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa (esc)
- [255]
-
-Fetching a single revision returns just metadata by default
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- }
- ]
-
-Requesting parents works
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0',
- b'parents': [
- b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Requesting revision data works
-(haveparents defaults to false, so fulltext is emitted)
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n'
- ]
-
-haveparents=False yields same output
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'revision']
- > haveparents eval:False
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n'
- ]
-
-haveparents=True will emit delta
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'revision']
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'fieldsfollowing': [
- [
- b'delta',
- 55
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
- ]
-
-Requesting multiple revisions works
-(haveparents defaults to false, so fulltext is emitted unless a parent
-has been emitted)
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
- },
- b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
- {
- b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'fieldsfollowing': [
- [
- b'delta',
- 55
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
- ]
-
-With haveparents=True, first revision is a delta instead of fulltext
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'revision']
- > haveparents eval:True
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
- },
- b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
- {
- b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'fieldsfollowing': [
- [
- b'delta',
- 55
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
- ]
-
-Revisions are sorted by DAG order, parents first
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0', b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4']
- > tree eval:b''
- > fields eval:[b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
- },
- b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
- {
- b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'fieldsfollowing': [
- [
- b'delta',
- 55
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
- },
- b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
- ]
-
-Requesting parents and revision data works
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
- > tree eval:b''
- > fields eval:[b'parents', b'revision']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 2
- },
- {
- b'fieldsfollowing': [
- [
- b'revision',
- 292
- ]
- ],
- b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
- {
- b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'fieldsfollowing': [
- [
- b'delta',
- 55
- ]
- ],
- b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0',
- b'parents': [
- b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- },
- b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-command-pushkey.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat >> .hg/hgrc << EOF
- > [web]
- > push_ssl = false
- > allow-push = *
- > EOF
- $ hg debugdrawdag << EOF
- > C D
- > |/
- > B
- > |
- > A
- > EOF
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-pushkey for a bookmark works
-
- $ sendhttpv2peer << EOF
- > command pushkey
- > namespace bookmarks
- > key @
- > old
- > new 426bada5c67598ca65036d57d9e4b64b0c1ce7a0
- > EOF
- creating http peer for wire protocol version 2
- sending pushkey command
- response: True
-
- $ sendhttpv2peer << EOF
- > command listkeys
- > namespace bookmarks
- > EOF
- creating http peer for wire protocol version 2
- sending listkeys command
- response: {
- b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0'
- }
-
- $ cat error.log
--- a/tests/test-wireproto-command-rawstorefiledata.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,132 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
- $ echo a1 > a
- $ hg commit -m 'commit 1'
- $ mkdir dir0
- $ mkdir dir1
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ echo e0 > dir1/e
- $ echo f0 > dir1/f
- $ hg commit -A -m 'commit 2'
- adding dir0/c
- adding dir0/d
- adding dir1/e
- adding dir1/f
- $ echo f1 > dir1/f
- $ hg commit -m 'commit 3'
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
-Missing requirement argument results in error
-
- $ sendhttpv2peer << EOF
- > command rawstorefiledata
- > EOF
- creating http peer for wire protocol version 2
- sending rawstorefiledata command
- abort: missing required arguments: files
- [255]
-
-Unknown files value results in error
-
- $ sendhttpv2peer << EOF
- > command rawstorefiledata
- > files eval:[b'unknown']
- > EOF
- creating http peer for wire protocol version 2
- sending rawstorefiledata command
- abort: unknown file type: unknown
- [255]
-
-Requesting just changelog works
-
- $ sendhttpv2peer << EOF
- > command rawstorefiledata
- > files eval:[b'changelog']
- > EOF
- creating http peer for wire protocol version 2
- sending rawstorefiledata command
- response: gen[
- {
- b'filecount': 1,
- b'totalsize': 527 (no-zstd !)
- b'totalsize': 530 (zstd !)
- },
- {
- b'location': b'store',
- b'path': b'00changelog.i',
- b'size': 527 (no-zstd !)
- b'size': 530 (zstd !)
- },
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !)
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !)
- b''
- ]
-
-Requesting just manifestlog works (as impractical as that operation may be).
-
- $ sendhttpv2peer << EOF
- > command rawstorefiledata
- > files eval:[b'manifestlog']
- > EOF
- creating http peer for wire protocol version 2
- sending rawstorefiledata command
- response: gen[
- {
- b'filecount': 1,
- b'totalsize': 584 (no-zstd !)
- b'totalsize': 588 (zstd !)
- },
- {
- b'location': b'store',
- b'path': b'00manifest.i',
- b'size': 584 (no-zstd !)
- b'size': 588 (zstd !)
- },
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !)
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !)
- b''
- ]
-
-Requesting both changelog and manifestlog works.
-
- $ sendhttpv2peer << EOF
- > command rawstorefiledata
- > files eval:[b'changelog', b'manifestlog']
- > EOF
- creating http peer for wire protocol version 2
- sending rawstorefiledata command
- response: gen[
- {
- b'filecount': 2,
- b'totalsize': 1111 (no-zstd !)
- b'totalsize': 1118 (zstd !)
- },
- {
- b'location': b'store',
- b'path': b'00manifest.i',
- b'size': 584 (no-zstd !)
- b'size': 588 (zstd !)
- },
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (no-zstd !)
- b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd V\xfd\x01\x00b\xc5\x0e\x0f\xc0\xd1\x00\xfb\x0c\xb9\xca\xdf\xb2R\xba!\xf2\xf6\x1d\x80\xd5\x95Yc\xef9DaT\xcefcM\xf1\x12\t\x84\xf3\x1a\x04\x04N\\\'S\xf2\'\x8cz5\xc5\x9f\xfa\x18\xf3\x82W\x1a\x83Y\xe8\xf0\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x91\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd \xccE\x04\x00bK\x1e\x17\xb0A0\xff\xff\x9b\xb5V\x99\x99\xfa\xb6\xae\xf5n),"\xf1\n\x02\xb5\x07\x82++\xd1]T\x1b3\xaa\x8e\x10+)R\xa6\\\x9a\x10\xab+\xb4\x8bB\x9f\x13U\xd4\x98\xbd\xde \x9a\xf4\xd1}[\xfb{,q\x14Kf\x06\x1e\x10\xd6\x17\xbbl\x90\x16\xb9\xb3\xd8\x07\xee\xfc\xa8\x8eI\x10]\x9c\x1ava\x054W\xad\xdf\xb3\x18\xee\xbdd\x15\xdf$\x85St\n\xde\xee?\x91\xa0\x83\x11\x08\xd8\x01\x80\x10B\x04\x00\x04S\x04B\xc7Tw\x9f\xb9,\x00\x00\x00\x00\x01\x10\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n', (zstd !)
- b'',
- {
- b'location': b'store',
- b'path': b'00changelog.i',
- b'size': 527 (no-zstd !)
- b'size': 530 (zstd !)
- },
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (no-zstd !)
- b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00Q\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xb5/\xfd WE\x02\x00r\x04\x0f\x14\x90\x01\x0e#\xf7h$;NQC%\xf8f\xd7\xb1\x81\x8d+\x01\x16+)5\xa8\x19\xdaA\xae\xe3\x00\xe9v\xe2l\x05v\x19\x11\xd4\xc1onK\xa2\x17c\xb4\xf3\xe7 z\x13\x8f\x1c\xf3j4\x03\x03\x00`\x06\x84\x8b\x1a\n\x14\x00\x00\x00\x00\x00\xcf\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3', (zstd !)
- b''
- ]
-
- $ cat error.log
--- a/tests/test-wireproto-content-redirects.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1475 +0,0 @@
- $ . $TESTDIR/wireprotohelpers.sh
-
-persistent-nodemap is not enabled by default. It is not relevant for this test so disable it.
-
- $ cat >> $HGRCPATH << EOF
- > [format]
- > use-persistent-nodemap = no
- > [extensions]
- > blackbox =
- > [blackbox]
- > track = simplecache
- > EOF
-
- $ hg init server
- $ enablehttpv2 server
- $ cd server
- $ cat >> .hg/hgrc << EOF
- > [server]
- > compressionengines = zlib
- > [extensions]
- > simplecache = $TESTDIR/wireprotosimplecache.py
- > [simplecache]
- > cacheapi = true
- > EOF
-
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
- $ echo a1 > a
- $ hg commit -m 'commit 1'
-
- $ hg --debug debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
- 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
-
- $ hg --config simplecache.redirectsfile=redirects.py serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-a',
- > b'protocol': b'http',
- > b'snirequired': False,
- > b'tlsversions': [b'1.2', b'1.3'],
- > b'uris': [b'http://example.com/'],
- > },
- > ]
- > EOF
-
-Redirect targets advertised when configured
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2289\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (remote redirect target target-a is compatible) (tls1.2 !)
- (remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
- sending capabilities command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 111\r\n (tls1.2 !)
- s> content-length: 102\r\n (no-tls1.2 !)
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a (tls1.2 !)
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80 (no-tls1.2 !)
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 6de\r\n
- s> \xd6\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-a',
- b'protocol': b'http',
- b'snirequired': False,
- b'tlsversions': [
- b'1.2',
- b'1.3'
- ],
- b'uris': [
- b'http://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Unknown protocol is filtered from compatible targets
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-a',
- > b'protocol': b'http',
- > b'uris': [b'http://example.com/'],
- > },
- > {
- > b'name': b'target-b',
- > b'protocol': b'unknown',
- > b'uris': [b'unknown://example.com/'],
- > },
- > ]
- > EOF
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2316\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (remote redirect target target-a is compatible)
- (remote redirect target target-b uses unsupported protocol: unknown)
- sending capabilities command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 111\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 6f9\r\n
- s> \xf1\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-a',
- b'protocol': b'http',
- b'uris': [
- b'http://example.com/'
- ]
- },
- {
- b'name': b'target-b',
- b'protocol': b'unknown',
- b'uris': [
- b'unknown://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Missing SNI support filters targets that require SNI
-
- $ cat > nosni.py << EOF
- > from mercurial import sslutil
- > sslutil.hassni = False
- > EOF
- $ cat >> $HGRCPATH << EOF
- > [extensions]
- > nosni=`pwd`/nosni.py
- > EOF
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-bad-tls',
- > b'protocol': b'https',
- > b'uris': [b'https://example.com/'],
- > b'snirequired': True,
- > },
- > ]
- > EOF
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2276\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (redirect target target-bad-tls requires SNI, which is unsupported)
- sending capabilities command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 102\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 6d1\r\n
- s> \xc9\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-bad-tls',
- b'protocol': b'https',
- b'snirequired': True,
- b'uris': [
- b'https://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat >> $HGRCPATH << EOF
- > [extensions]
- > nosni=!
- > EOF
-
-Unknown tls value is filtered from compatible targets
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-bad-tls',
- > b'protocol': b'https',
- > b'uris': [b'https://example.com/'],
- > b'tlsversions': [b'42', b'39'],
- > },
- > ]
- > EOF
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2282\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe4batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
- sending capabilities command
- s> setsockopt(6, 1, 1) -> None (?)
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 102\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- s> 6d7\r\n
- s> \xcf\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/
- s> \r\n
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1',
- b'sparserevlog'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-bad-tls',
- b'protocol': b'https',
- b'tlsversions': [
- b'42',
- b'39'
- ],
- b'uris': [
- b'https://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Set up the server to issue content redirects to its built-in API server.
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'local',
- > b'protocol': b'http',
- > b'uris': [b'http://example.com/'],
- > },
- > ]
- > EOF
-
-Request to eventual cache URL should return 404 (validating the cache server works)
-
- $ sendhttpraw << EOF
- > httprequest GET api/simplecache/missingkey
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/simplecache/missingkey HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 404 Not Found\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: text/plain\r\n
- s> Content-Length: 22\r\n
- s> \r\n
- s> key not found in cache
-
-Send a cacheable request
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-Cached entry should be available on server
-
- $ sendhttpraw << EOF
- > httprequest GET api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c
- > user-agent: test
- > EOF
- using raw connection to peer
- s> setsockopt(6, 1, 1) -> None (?)
- s> GET /api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> user-agent: test\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 91\r\n
- s> \r\n
- s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
- cbor> [
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
-2nd request should result in content redirect response
-
- $ sendhttpv2peer << EOF
- > command manifestdata
- > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
- > tree eval:b''
- > fields eval:[b'parents']
- > EOF
- creating http peer for wire protocol version 2
- sending manifestdata command
- response: gen[
- {
- b'totalitems': 1
- },
- {
- b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- b'parents': [
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
- b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
- ]
- }
- ]
-
- $ cat error.log
- $ killdaemons.py
-
- $ cat .hg/blackbox.log
- *> cacher constructed for manifestdata (glob)
- *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> cacher constructed for manifestdata (glob)
- *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
- *> sending content redirect for 47abb8efa5f01b8964d74917793ad2464db0fa2c to http://*:$HGPORT/api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
--- a/tests/test-wireproto-exchangev2-shallow.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,597 +0,0 @@
-#require sqlite
-
-Tests for wire protocol version 2 exchange.
-Tests in this file should be folded into existing tests once protocol
-v2 has enough features that it can be enabled via #testcase in existing
-tests.
-
- $ . $TESTDIR/wireprotohelpers.sh
- $ enablehttpv2client
- $ cat >> $HGRCPATH << EOF
- > [extensions]
- > sqlitestore =
- > pullext = $TESTDIR/pullext.py
- > [storage]
- > new-repo-backend=sqlite
- > EOF
-
-Configure a server
-
- $ hg init server-basic
- $ enablehttpv2 server-basic
- $ cd server-basic
- $ mkdir dir0 dir1
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ hg -q commit -A -m 'commit 1'
- $ echo e0 > dir1/e
- $ echo f0 > dir1/f
- $ hg -q commit -A -m 'commit 2'
- $ echo c1 > dir0/c
- $ echo e1 > dir1/e
- $ hg commit -m 'commit 3'
- $ echo c2 > dir0/c
- $ echo e2 > dir1/e
- $ echo f1 > dir1/f
- $ hg commit -m 'commit 4'
- $ echo a1 > a
- $ echo b1 > b
- $ hg commit -m 'commit 5'
-
- $ hg log -G -T '{node} {desc}'
- @ 93a8bd067ed2840d9aa810ad598168383a3a2c3a commit 5
- |
- o dc666cf9ecf3d94e6b830f30e5f1272e2a9164d9 commit 4
- |
- o 97765fc3cd624fd1fa0176932c21ffd16adf432e commit 3
- |
- o 47fe012ab237a8c7fc0c78f9f26d5866eef3f825 commit 2
- |
- o b709380892b193c1091d3a817f706052e346821b commit 1
- |
- o 3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
- $ cd ..
-
-Shallow clone pulls down latest revision of every file
-
- $ hg --debug clone --depth 1 http://localhost:$HGPORT client-shallow-1
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 3390ef850073
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset b709380892b1
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 47fe012ab237
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 97765fc3cd62
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset dc666cf9ecf3
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 93a8bd067ed2
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3',
- 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
- '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
- ],
- 'tree': ''
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'linknode',
- 'parents',
- 'revision'
- ]),
- 'haveparents': False,
- 'revisions': [
- {
- 'nodes': [
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- updating the branch cache
- new changesets 3390ef850073:93a8bd067ed2
- updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2
- a: remote created -> g
- getting a
- b: remote created -> g
- getting b
- dir0/c: remote created -> g
- getting dir0/c
- dir0/d: remote created -> g
- getting dir0/d
- dir1/e: remote created -> g
- getting dir1/e
- dir1/f: remote created -> g
- getting dir1/f
- 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-#if chg
- $ hg --kill-chg-daemon
- $ sleep 2
-#endif
- $ sqlite3 -line client-shallow-1/.hg/store/db.sqlite << EOF
- > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
- > EOF
- id = 1
- path = a
- revnum = 0
- node = \x9a8\x12)\x97\xb3\xac\x97\xbe*\x9a\xa2\xe5V\x83\x83A\xfd\xf2\xcc (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
- id = 2
- path = b
- revnum = 0
- node = \xb1zk\xd3g=\x9a\xb8\xce\xd5\x81\xa2 \xf6/=\xa5\xccEx (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
- id = 3
- path = dir0/c
- revnum = 0
- node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
- id = 4
- path = dir0/d
- revnum = 0
- node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 0
-
- id = 5
- path = dir1/e
- revnum = 0
- node = ]\xf3\xac\xd8\xd0\xc7\xfaP\x98\xd0'\x9a\x044\xc3\x02\x9e+x\xe1 (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
- id = 6
- path = dir1/f
- revnum = 0
- node = (\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
-Test a shallow clone with only some files
-
- $ hg --debug clone --depth 1 --include dir0/ http://localhost:$HGPORT client-shallow-narrow-1
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 3390ef850073
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset b709380892b1
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 47fe012ab237
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 97765fc3cd62
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset dc666cf9ecf3
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 93a8bd067ed2
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3',
- 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
- '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
- ],
- 'tree': ''
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'linknode',
- 'parents',
- 'revision'
- ]),
- 'haveparents': False,
- 'pathfilter': {
- 'include': [
- 'path:dir0'
- ]
- },
- 'revisions': [
- {
- 'nodes': [
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=355; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- updating the branch cache
- new changesets 3390ef850073:93a8bd067ed2
- updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2
- dir0/c: remote created -> g
- getting dir0/c
- dir0/d: remote created -> g
- getting dir0/d
- 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-#if chg
- $ hg --kill-chg-daemon
- $ sleep 2
-#endif
- $ sqlite3 -line client-shallow-narrow-1/.hg/store/db.sqlite << EOF
- > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
- > EOF
- id = 1
- path = dir0/c
- revnum = 0
- node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 2
-
- id = 2
- path = dir0/d
- revnum = 0
- node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
- p1rev = -1
- p2rev = -1
- linkrev = 5
- flags = 0
-
-Cloning an old revision with depth=1 works
-
- $ hg --debug clone --depth 1 -r 97765fc3cd624fd1fa0176932c21ffd16adf432e http://localhost:$HGPORT client-shallow-2
- using http://localhost:$HGPORT/
- sending capabilities command
- sending 1 commands
- sending command lookup: {
- 'key': '97765fc3cd624fd1fa0176932c21ffd16adf432e'
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 3390ef850073
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset b709380892b1
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 47fe012ab237
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 97765fc3cd62
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
- ],
- 'tree': ''
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'linknode',
- 'parents',
- 'revision'
- ]),
- 'haveparents': False,
- 'revisions': [
- {
- 'nodes': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- updating the branch cache
- new changesets 3390ef850073:97765fc3cd62
- updating to branch default
- resolving manifests
- branchmerge: False, force: False, partial: False
- ancestor: 000000000000, local: 000000000000+, remote: 97765fc3cd62
- a: remote created -> g
- getting a
- b: remote created -> g
- getting b
- dir0/c: remote created -> g
- getting dir0/c
- dir0/d: remote created -> g
- getting dir0/d
- dir1/e: remote created -> g
- getting dir1/e
- dir1/f: remote created -> g
- getting dir1/f
- 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
- updating the branch cache
- (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Incremental pull of shallow clone fetches new changesets
-
- $ hg --cwd client-shallow-2 --debug pull http://localhost:$HGPORT
- pulling from http://localhost:$HGPORT/
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- searching for changes
- all local changesets known remotely
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'roots': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=400; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset dc666cf9ecf3
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- add changeset 93a8bd067ed2
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- 'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
- '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
- ],
- 'tree': ''
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=561; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'linknode',
- 'parents',
- 'revision'
- ]),
- 'haveparents': False,
- 'revisions': [
- {
- 'nodes': [
- '\xdcfl\xf9\xec\xf3\xd9Nk\x83\x0f0\xe5\xf1\'.*\x91d\xd9',
- '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1373; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- updating the branch cache
- new changesets dc666cf9ecf3:93a8bd067ed2
- (run 'hg update' to get a working copy)
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ hg --cwd client-shallow-2 up tip
- merging dir0/c
- merging dir1/e
- 3 files updated, 2 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-wireproto-exchangev2.t Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1392 +0,0 @@
-Tests for wire protocol version 2 exchange.
-Tests in this file should be folded into existing tests once protocol
-v2 has enough features that it can be enabled via #testcase in existing
-tests.
-
- $ . $TESTDIR/wireprotohelpers.sh
- $ enablehttpv2client
-
- $ hg init server-simple
- $ enablehttpv2 server-simple
- $ cd server-simple
- $ cat >> .hg/hgrc << EOF
- > [phases]
- > publish = false
- > EOF
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
-
- $ echo a1 > a
- $ hg commit -m 'commit 1'
- $ hg phase --public -r .
- $ echo a2 > a
- $ hg commit -m 'commit 2'
-
- $ hg -q up -r 0
- $ echo b1 > b
- $ hg -q commit -m 'head 2 commit 1'
- $ echo b2 > b
- $ hg -q commit -m 'head 2 commit 2'
-
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
- $ cd ..
-
-Test basic clone
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug clone -U http://localhost:$HGPORT client-simple > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset 4432d83626e8
- add changeset cd2534766bec
- add changeset e96ae20f4188
- add changeset caa2a465451d
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
- '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
- '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
- '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
- '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:caa2a465451d (3 drafts)
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=941; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
-All changesets should have been transferred
-
- $ hg -R client-simple debugindex -c
- rev linkrev nodeid p1 p2
- 0 0 3390ef850073 000000000000 000000000000
- 1 1 4432d83626e8 3390ef850073 000000000000
- 2 2 cd2534766bec 4432d83626e8 000000000000
- 3 3 e96ae20f4188 3390ef850073 000000000000
- 4 4 caa2a465451d e96ae20f4188 000000000000
-
- $ hg -R client-simple log -G -T '{rev} {node} {phase}\n'
- o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft
- |
- o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
- |
- | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
- | |
- | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
- |/
- o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public
-
-
-All manifests should have been transferred
-
- $ hg -R client-simple debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 992f4779029a 000000000000 000000000000
- 1 1 a988fb43583e 992f4779029a 000000000000
- 2 2 ec804e488c20 a988fb43583e 000000000000
- 3 3 045c7f3927da 992f4779029a 000000000000
- 4 4 379cb0c2e664 045c7f3927da 000000000000
-
-Cloning only a specific revision works
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug clone -U -r 4432d83626e8 http://localhost:$HGPORT client-singlehead > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- sending 1 commands
- sending command lookup: {
- 'key': '4432d83626e8'
- }
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset 4432d83626e8
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:4432d83626e8
- updating the branch cache
- (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=381; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=404; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=439; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
- $ cd client-singlehead
-
- $ hg log -G -T '{rev} {node} {phase}\n'
- o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
- |
- o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public
-
-
- $ hg debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 992f4779029a 000000000000 000000000000
- 1 1 a988fb43583e 992f4779029a 000000000000
-
-Incremental pull works
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug pull > pull-output
-
- $ cat pull-output | grep -v "received frame"
- pulling from http://localhost:$HGPORT/
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
- ]
- }
- searching for changes
- all local changesets known remotely
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'roots': [
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset cd2534766bec
- add changeset e96ae20f4188
- add changeset caa2a465451d
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
- '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
- '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'revisions': [
- {
- 'nodes': [
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
- '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets cd2534766bec:caa2a465451d (3 drafts)
- (run 'hg update' to get a working copy)
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat pull-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=573; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=601; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=527; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm pull-output
-
- $ hg log -G -T '{rev} {node} {phase}\n'
- o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft
- |
- o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
- |
- | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
- | |
- | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
- |/
- o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public
-
-
- $ hg debugindex -m
- rev linkrev nodeid p1 p2
- 0 0 992f4779029a 000000000000 000000000000
- 1 1 a988fb43583e 992f4779029a 000000000000
- 2 2 ec804e488c20 a988fb43583e 000000000000
- 3 3 045c7f3927da 992f4779029a 000000000000
- 4 4 379cb0c2e664 045c7f3927da 000000000000
-
-Phase-only update works
-TODO this doesn't work
-
- $ hg -R ../server-simple phase --public -r caa2a465451dd
- $ hg --debug pull
- pulling from http://localhost:$HGPORT/
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- searching for changes
- all remote heads known locally
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'roots': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- checking for updated bookmarks
- (run 'hg update' to get a working copy)
- (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ hg log -G -T '{rev} {node} {phase}\n'
- o 4 caa2a465451dd1facda0f5b12312c355584188a1 draft
- |
- o 3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
- |
- | o 2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
- | |
- | o 1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
- |/
- o 0 3390ef850073fbc2f0dfff2244342c8e9229013a public
-
-
- $ cd ..
-
-Bookmarks are transferred on clone
-
- $ hg -R server-simple bookmark -r 3390ef850073fbc2f0dfff2244342c8e9229013a book-1
- $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-2
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug clone -U http://localhost:$HGPORT/ client-bookmarks > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset 4432d83626e8
- add changeset cd2534766bec
- add changeset e96ae20f4188
- add changeset caa2a465451d
- checking for updated bookmarks
- adding remote bookmark book-1
- adding remote bookmark book-2
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
- '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
- '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
- '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- 'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
- '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:caa2a465451d (1 drafts)
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=979; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
- $ hg -R client-bookmarks bookmarks
- book-1 0:3390ef850073
- book-2 2:cd2534766bec
-
-Server-side bookmark moves are reflected during `hg pull`
-
- $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-1
- moving bookmark 'book-1' forward from 3390ef850073
-
-Output is flaky, save it in a file and check part independently
- $ hg -R client-bookmarks --debug pull > pull-output
-
- $ cat pull-output | grep -v "received frame"
- pulling from http://localhost:$HGPORT/
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
- ]
- }
- searching for changes
- all remote heads known locally
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'roots': [
- '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
- '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- checking for updated bookmarks
- updating bookmark book-1
- (run 'hg update' to get a working copy)
- (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat pull-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=65; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm pull-output
-
- $ hg -R client-bookmarks bookmarks
- book-1 2:cd2534766bec
- book-2 2:cd2534766bec
-
- $ killdaemons.py
-
-Let's set up a slightly more complicated server
-
- $ hg init server-2
- $ enablehttpv2 server-2
- $ cd server-2
- $ mkdir dir0 dir1
- $ echo a0 > a
- $ echo b0 > b
- $ hg -q commit -A -m 'commit 0'
- $ echo c0 > dir0/c
- $ echo d0 > dir0/d
- $ hg -q commit -A -m 'commit 1'
- $ echo e0 > dir1/e
- $ echo f0 > dir1/f
- $ hg -q commit -A -m 'commit 2'
- $ echo c1 > dir0/c
- $ echo e1 > dir1/e
- $ hg commit -m 'commit 3'
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
-
- $ cd ..
-
-Narrow clone only fetches some files
-
-Output is flaky, save it in a file and check part independently
- $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ http://localhost:$HGPORT/ client-narrow-0 > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset b709380892b1
- add changeset 47fe012ab237
- add changeset 97765fc3cd62
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'pathfilter': {
- 'include': [
- 'path:dir0'
- ]
- },
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
- 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:97765fc3cd62
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
-#if reporevlogstore
- $ find client-narrow-0/.hg/store -type f -name '*.i' | sort
- client-narrow-0/.hg/store/00changelog.i
- client-narrow-0/.hg/store/00manifest.i
- client-narrow-0/.hg/store/data/dir0/c.i
- client-narrow-0/.hg/store/data/dir0/d.i
-#endif
-
---exclude by itself works
-
-Output is flaky, save it in a file and check part independently
- $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --exclude dir0/ http://localhost:$HGPORT/ client-narrow-1 > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset b709380892b1
- add changeset 47fe012ab237
- add changeset 97765fc3cd62
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'pathfilter': {
- 'exclude': [
- 'path:dir0'
- ],
- 'include': [
- 'path:.'
- ]
- },
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
- 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:97765fc3cd62
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=709; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
-#if reporevlogstore
- $ find client-narrow-1/.hg/store -type f -name '*.i' | sort
- client-narrow-1/.hg/store/00changelog.i
- client-narrow-1/.hg/store/00manifest.i
- client-narrow-1/.hg/store/data/a.i
- client-narrow-1/.hg/store/data/b.i
- client-narrow-1/.hg/store/data/dir1/e.i
- client-narrow-1/.hg/store/data/dir1/f.i
-#endif
-
-Mixing --include and --exclude works
-
-Output is flaky, save it in a file and check part independently
- $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ --exclude dir0/c http://localhost:$HGPORT/ client-narrow-2 > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset b709380892b1
- add changeset 47fe012ab237
- add changeset 97765fc3cd62
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'pathfilter': {
- 'exclude': [
- 'path:dir0/c'
- ],
- 'include': [
- 'path:dir0'
- ]
- },
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
- 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- new changesets 3390ef850073:97765fc3cd62
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=160; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
-#if reporevlogstore
- $ find client-narrow-2/.hg/store -type f -name '*.i' | sort
- client-narrow-2/.hg/store/00changelog.i
- client-narrow-2/.hg/store/00manifest.i
- client-narrow-2/.hg/store/data/dir0/d.i
-#endif
-
---stream will use rawfiledata to transfer changelog and manifestlog, then
-fall through to get files data
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug clone --stream -U http://localhost:$HGPORT client-stream-0 > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- sending 1 commands
- sending command rawstorefiledata: {
- 'files': [
- 'changelog',
- 'manifestlog'
- ]
- }
- updating the branch cache
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ]
- }
- searching for changes
- all remote heads known locally
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- checking for updated bookmarks
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
- 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !)
- received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1133; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
---stream + --include/--exclude will only obtain some files
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --stream --include dir0/ -U http://localhost:$HGPORT client-stream-2 > clone-output
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- sending 1 commands
- sending command rawstorefiledata: {
- 'files': [
- 'changelog',
- 'manifestlog'
- ]
- }
- updating the branch cache
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ]
- }
- searching for changes
- all remote heads known locally
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetdagrange'
- }
- ]
- }
- checking for updated bookmarks
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'pathfilter': {
- 'include': [
- 'path:dir0'
- ]
- },
- 'revisions': [
- {
- 'nodes': [
- '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
- '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
- 'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (no-zstd !)
- received frame(size=1283; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation) (zstd !)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
-#if reporevlogstore
- $ find client-stream-2/.hg/store -type f -name '*.i' | sort
- client-stream-2/.hg/store/00changelog.i
- client-stream-2/.hg/store/00manifest.i
- client-stream-2/.hg/store/data/dir0/c.i
- client-stream-2/.hg/store/data/dir0/d.i
-#endif
-
-Shallow clone doesn't work with revlogs
-
-Output is flaky, save it in a file and check part independently
- $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --depth 1 -U http://localhost:$HGPORT client-shallow-revlogs > clone-output
- transaction abort!
- rollback completed
- abort: revlog storage does not support missing parents write mode
- [255]
-
- $ cat clone-output | grep -v "received frame"
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending 2 commands
- sending command heads: {}
- sending command known: {
- 'nodes': []
- }
- sending 1 commands
- sending command changesetdata: {
- 'fields': set([
- 'bookmarks',
- 'parents',
- 'phase',
- 'revision'
- ]),
- 'revisions': [
- {
- 'heads': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'roots': [],
- 'type': 'changesetdagrange'
- }
- ]
- }
- add changeset 3390ef850073
- add changeset b709380892b1
- add changeset 47fe012ab237
- add changeset 97765fc3cd62
- checking for updated bookmarks
- sending 1 commands
- sending command manifestdata: {
- 'fields': set([
- 'parents',
- 'revision'
- ]),
- 'haveparents': True,
- 'nodes': [
- '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
- '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
- '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
- '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
- ],
- 'tree': ''
- }
- sending 1 commands
- sending command filesdata: {
- 'fields': set([
- 'linknode',
- 'parents',
- 'revision'
- ]),
- 'haveparents': False,
- 'revisions': [
- {
- 'nodes': [
- '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
- ],
- 'type': 'changesetexplicit'
- }
- ]
- }
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat clone-output | grep "received frame"
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
-
- $ rm clone-output
-
- $ killdaemons.py
-
-Repo with 2 DAG branches introducing same filenode, to test linknode adjustment
-
- $ hg init server-linknode
- $ enablehttpv2 server-linknode
- $ cd server-linknode
- $ touch foo
- $ hg -q commit -Am initial
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 1'
- adding dupe-file
- $ hg -q up -r 0
- $ echo foo > dupe-file
- $ hg commit -Am 'dupe 2'
- adding dupe-file
- created new head
- $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
-Perform an incremental pull of both heads and ensure linkrev is written out properly
-
- $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-1
- new changesets 96ee1d7354c4
- updating to branch default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cd client-linknode-1
- $ touch extra
- $ hg commit -Am extra
- adding extra
- $ cd ..
-
- $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-2
- new changesets 96ee1d7354c4
- updating to branch default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cd client-linknode-2
- $ touch extra
- $ hg commit -Am extra
- adding extra
- $ cd ..
-
- $ hg -R client-linknode-1 pull -r 1681c33f9f80
- pulling from http://localhost:$HGPORT/
- searching for changes
- new changesets 1681c33f9f80
- (run 'hg update' to get a working copy)
-
-#if reporevlogstore
- $ hg -R client-linknode-1 debugrevlogindex dupe-file
- rev linkrev nodeid p1 p2
- 0 2 2ed2a3912a0b 000000000000 000000000000
-#endif
-
- $ hg -R client-linknode-2 pull -r 639c8990d6a5
- pulling from http://localhost:$HGPORT/
- searching for changes
- new changesets 639c8990d6a5
- (run 'hg update' to get a working copy)
-
-#if reporevlogstore
- $ hg -R client-linknode-2 debugrevlogindex dupe-file
- rev linkrev nodeid p1 p2
- 0 2 2ed2a3912a0b 000000000000 000000000000
-#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/badserverext.py Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,462 @@
+# badserverext.py - Extension making servers behave badly
+#
+# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code
+
+"""Extension to make servers behave badly.
+
+This extension is useful for testing Mercurial behavior when various network
+events occur.
+
+Various config options in the [badserver] section influence behavior:
+
+close-before-accept
+ If true, close() the server socket when a new connection arrives before
+ accept() is called. The server will then exit.
+
+close-after-accept
+ If true, the server will close() the client socket immediately after
+ accept().
+
+close-after-recv-bytes
+ If defined, close the client socket after receiving this many bytes.
+ (The value is a list, multiple values can use used to close a series of requests
+ request)
+
+close-after-recv-patterns
+ If defined, the `close-after-recv-bytes` values only start counting after the
+ `read` operation that encountered the defined patterns.
+ (The value is a list, multiple values can use used to close a series of requests
+ request)
+
+close-after-send-bytes
+ If defined, close the client socket after sending this many bytes.
+ (The value is a list, multiple values can use used to close a series of requests
+ request)
+
+close-after-send-patterns
+ If defined, close the client socket after the configured regexp is seen.
+ (The value is a list, multiple values can use used to close a series of requests
+ request)
+"""
+
+from __future__ import absolute_import
+
+import re
+import socket
+
+from mercurial import (
+ pycompat,
+ registrar,
+)
+
+from mercurial.hgweb import server
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(
+ b'badserver',
+ b'close-after-accept',
+ default=False,
+)
+configitem(
+ b'badserver',
+ b'close-after-recv-bytes',
+ default=b'0',
+)
+configitem(
+ b'badserver',
+ b'close-after-recv-patterns',
+ default=b'',
+)
+configitem(
+ b'badserver',
+ b'close-after-send-bytes',
+ default=b'0',
+)
+configitem(
+ b'badserver',
+ b'close-after-send-patterns',
+ default=b'',
+)
+configitem(
+ b'badserver',
+ b'close-before-accept',
+ default=False,
+)
+
+
+class ConditionTracker(object):
+ def __init__(
+ self,
+ close_after_recv_bytes,
+ close_after_recv_patterns,
+ close_after_send_bytes,
+ close_after_send_patterns,
+ ):
+ self._all_close_after_recv_bytes = close_after_recv_bytes
+ self._all_close_after_recv_patterns = close_after_recv_patterns
+ self._all_close_after_send_bytes = close_after_send_bytes
+ self._all_close_after_send_patterns = close_after_send_patterns
+
+ self.target_recv_bytes = None
+ self.remaining_recv_bytes = None
+ self.recv_patterns = None
+ self.recv_data = b''
+ self.target_send_bytes = None
+ self.remaining_send_bytes = None
+ self.send_pattern = None
+ self.send_data = b''
+
+ def start_next_request(self):
+ """move to the next set of close condition"""
+ if self._all_close_after_recv_bytes:
+ self.target_recv_bytes = self._all_close_after_recv_bytes.pop(0)
+ self.remaining_recv_bytes = self.target_recv_bytes
+ else:
+ self.target_recv_bytes = None
+ self.remaining_recv_bytes = None
+
+ self.recv_data = b''
+ if self._all_close_after_recv_patterns:
+ self.recv_pattern = self._all_close_after_recv_patterns.pop(0)
+ else:
+ self.recv_pattern = None
+
+ if self._all_close_after_send_bytes:
+ self.target_send_bytes = self._all_close_after_send_bytes.pop(0)
+ self.remaining_send_bytes = self.target_send_bytes
+ else:
+ self.target_send_bytes = None
+ self.remaining_send_bytes = None
+
+ self.send_data = b''
+ if self._all_close_after_send_patterns:
+ self.send_pattern = self._all_close_after_send_patterns.pop(0)
+ else:
+ self.send_pattern = None
+
+ def might_close(self):
+ """True, if any processing will be needed"""
+ if self.remaining_recv_bytes is not None:
+ return True
+ if self.recv_pattern is not None:
+ return True
+ if self.remaining_send_bytes is not None:
+ return True
+ if self.send_pattern is not None:
+ return True
+ return False
+
+ def forward_write(self, obj, method, data, *args, **kwargs):
+ """call an underlying write function until condition are met
+
+ When the condition are met the socket is closed
+ """
+ remaining = self.remaining_send_bytes
+ pattern = self.send_pattern
+
+ orig = object.__getattribute__(obj, '_orig')
+ bmethod = method.encode('ascii')
+ func = getattr(orig, method)
+
+ if pattern:
+ self.send_data += data
+ pieces = pattern.split(self.send_data, maxsplit=1)
+ if len(pieces) > 1:
+ dropped = len(pieces[-1])
+ remaining = len(data) - dropped
+
+ if remaining:
+ remaining = max(0, remaining)
+
+ if not remaining:
+ newdata = data
+ else:
+ if remaining < len(data):
+ newdata = data[0:remaining]
+ else:
+ newdata = data
+ remaining -= len(newdata)
+ self.remaining_send_bytes = remaining
+
+ result = func(newdata, *args, **kwargs)
+
+ if remaining is None:
+ obj._writelog(b'%s(%d) -> %s' % (bmethod, len(data), data))
+ else:
+ msg = b'%s(%d from %d) -> (%d) %s'
+ msg %= (bmethod, len(newdata), len(data), remaining, newdata)
+ obj._writelog(msg)
+
+ if remaining is not None and remaining <= 0:
+ obj._writelog(b'write limit reached; closing socket')
+ object.__getattribute__(obj, '_cond_close')()
+ raise Exception('connection closed after sending N bytes')
+
+ return result
+
+ def forward_read(self, obj, method, size=-1):
+ """call an underlying read function until condition are met
+
+ When the condition are met the socket is closed
+ """
+ remaining = self.remaining_recv_bytes
+ pattern = self.recv_pattern
+
+ orig = object.__getattribute__(obj, '_orig')
+ bmethod = method.encode('ascii')
+ func = getattr(orig, method)
+
+ requested_size = size
+ actual_size = size
+
+ if pattern is None and remaining:
+ if size < 0:
+ actual_size = remaining
+ else:
+ actual_size = min(remaining, requested_size)
+
+ result = func(actual_size)
+
+ if pattern is None and remaining:
+ remaining -= len(result)
+ self.remaining_recv_bytes = remaining
+
+ if requested_size == 65537:
+ requested_repr = b'~'
+ else:
+ requested_repr = b'%d' % requested_size
+ if requested_size == actual_size:
+ msg = b'%s(%s) -> (%d) %s'
+ msg %= (bmethod, requested_repr, len(result), result)
+ else:
+ msg = b'%s(%d from %s) -> (%d) %s'
+ msg %= (bmethod, actual_size, requested_repr, len(result), result)
+ obj._writelog(msg)
+
+ if pattern is not None:
+ self.recv_data += result
+ if pattern.search(self.recv_data):
+ # start counting bytes starting with the next read
+ self.recv_pattern = None
+
+ if remaining is not None and remaining <= 0:
+ obj._writelog(b'read limit reached; closing socket')
+ obj._cond_close()
+
+ # This is the easiest way to abort the current request.
+ raise Exception('connection closed after receiving N bytes')
+
+ return result
+
+
+# We can't adjust __class__ on a socket instance. So we define a proxy type.
+class socketproxy(object):
+ __slots__ = ('_orig', '_logfp', '_cond')
+
+ def __init__(self, obj, logfp, condition_tracked):
+ object.__setattr__(self, '_orig', obj)
+ object.__setattr__(self, '_logfp', logfp)
+ object.__setattr__(self, '_cond', condition_tracked)
+
+ def __getattribute__(self, name):
+ if name in ('makefile', 'sendall', '_writelog', '_cond_close'):
+ return object.__getattribute__(self, name)
+
+ return getattr(object.__getattribute__(self, '_orig'), name)
+
+ def __delattr__(self, name):
+ delattr(object.__getattribute__(self, '_orig'), name)
+
+ def __setattr__(self, name, value):
+ setattr(object.__getattribute__(self, '_orig'), name, value)
+
+ def _writelog(self, msg):
+ msg = msg.replace(b'\r', b'\\r').replace(b'\n', b'\\n')
+
+ object.__getattribute__(self, '_logfp').write(msg)
+ object.__getattribute__(self, '_logfp').write(b'\n')
+ object.__getattribute__(self, '_logfp').flush()
+
+ def makefile(self, mode, bufsize):
+ f = object.__getattribute__(self, '_orig').makefile(mode, bufsize)
+
+ logfp = object.__getattribute__(self, '_logfp')
+ cond = object.__getattribute__(self, '_cond')
+
+ return fileobjectproxy(f, logfp, cond)
+
+ def sendall(self, data, flags=0):
+ cond = object.__getattribute__(self, '_cond')
+ return cond.forward_write(self, 'sendall', data, flags)
+
+ def _cond_close(self):
+ object.__getattribute__(self, '_orig').shutdown(socket.SHUT_RDWR)
+
+
+# We can't adjust __class__ on socket._fileobject, so define a proxy.
+class fileobjectproxy(object):
+ __slots__ = ('_orig', '_logfp', '_cond')
+
+ def __init__(self, obj, logfp, condition_tracked):
+ object.__setattr__(self, '_orig', obj)
+ object.__setattr__(self, '_logfp', logfp)
+ object.__setattr__(self, '_cond', condition_tracked)
+
+ def __getattribute__(self, name):
+ if name in (
+ '_close',
+ 'read',
+ 'readline',
+ 'write',
+ '_writelog',
+ '_cond_close',
+ ):
+ return object.__getattribute__(self, name)
+
+ return getattr(object.__getattribute__(self, '_orig'), name)
+
+ def __delattr__(self, name):
+ delattr(object.__getattribute__(self, '_orig'), name)
+
+ def __setattr__(self, name, value):
+ setattr(object.__getattribute__(self, '_orig'), name, value)
+
+ def _writelog(self, msg):
+ msg = msg.replace(b'\r', b'\\r').replace(b'\n', b'\\n')
+
+ object.__getattribute__(self, '_logfp').write(msg)
+ object.__getattribute__(self, '_logfp').write(b'\n')
+ object.__getattribute__(self, '_logfp').flush()
+
+ def _close(self):
+ # Python 3 uses an io.BufferedIO instance. Python 2 uses some file
+ # object wrapper.
+ if pycompat.ispy3:
+ orig = object.__getattribute__(self, '_orig')
+
+ if hasattr(orig, 'raw'):
+ orig.raw._sock.shutdown(socket.SHUT_RDWR)
+ else:
+ self.close()
+ else:
+ self._sock.shutdown(socket.SHUT_RDWR)
+
+ def read(self, size=-1):
+ cond = object.__getattribute__(self, '_cond')
+ return cond.forward_read(self, 'read', size)
+
+ def readline(self, size=-1):
+ cond = object.__getattribute__(self, '_cond')
+ return cond.forward_read(self, 'readline', size)
+
+ def write(self, data):
+ cond = object.__getattribute__(self, '_cond')
+ return cond.forward_write(self, 'write', data)
+
+ def _cond_close(self):
+ self._close()
+
+
+def process_bytes_config(value):
+ parts = value.split(b',')
+ integers = [int(v) for v in parts if v]
+ return [v if v else None for v in integers]
+
+
+def process_pattern_config(value):
+ patterns = []
+ for p in value.split(b','):
+ if not p:
+ p = None
+ else:
+ p = re.compile(p, re.DOTALL | re.MULTILINE)
+ patterns.append(p)
+ return patterns
+
+
+def extsetup(ui):
+ # Change the base HTTP server class so various events can be performed.
+ # See SocketServer.BaseServer for how the specially named methods work.
+ class badserver(server.MercurialHTTPServer):
+ def __init__(self, ui, *args, **kwargs):
+ self._ui = ui
+ super(badserver, self).__init__(ui, *args, **kwargs)
+
+ all_recv_bytes = self._ui.config(
+ b'badserver', b'close-after-recv-bytes'
+ )
+ all_recv_bytes = process_bytes_config(all_recv_bytes)
+ all_recv_pattern = self._ui.config(
+ b'badserver', b'close-after-recv-patterns'
+ )
+ all_recv_pattern = process_pattern_config(all_recv_pattern)
+ all_send_bytes = self._ui.config(
+ b'badserver', b'close-after-send-bytes'
+ )
+ all_send_bytes = process_bytes_config(all_send_bytes)
+ all_send_patterns = self._ui.config(
+ b'badserver', b'close-after-send-patterns'
+ )
+ all_send_patterns = process_pattern_config(all_send_patterns)
+ self._cond = ConditionTracker(
+ all_recv_bytes,
+ all_recv_pattern,
+ all_send_bytes,
+ all_send_patterns,
+ )
+
+ # Need to inherit object so super() works.
+ class badrequesthandler(self.RequestHandlerClass, object):
+ def send_header(self, name, value):
+ # Make headers deterministic to facilitate testing.
+ if name.lower() == 'date':
+ value = 'Fri, 14 Apr 2017 00:00:00 GMT'
+ elif name.lower() == 'server':
+ value = 'badhttpserver'
+
+ return super(badrequesthandler, self).send_header(
+ name, value
+ )
+
+ self.RequestHandlerClass = badrequesthandler
+
+ # Called to accept() a pending socket.
+ def get_request(self):
+ if self._ui.configbool(b'badserver', b'close-before-accept'):
+ self.socket.close()
+
+ # Tells the server to stop processing more requests.
+ self.__shutdown_request = True
+
+ # Simulate failure to stop processing this request.
+ raise socket.error('close before accept')
+
+ if self._ui.configbool(b'badserver', b'close-after-accept'):
+ request, client_address = super(badserver, self).get_request()
+ request.close()
+ raise socket.error('close after accept')
+
+ return super(badserver, self).get_request()
+
+ # Does heavy lifting of processing a request. Invokes
+ # self.finish_request() which calls self.RequestHandlerClass() which
+ # is a hgweb.server._httprequesthandler.
+ def process_request(self, socket, address):
+ # Wrap socket in a proxy if we need to count bytes.
+ self._cond.start_next_request()
+
+ if self._cond.might_close():
+ socket = socketproxy(
+ socket, self.errorlog, condition_tracked=self._cond
+ )
+
+ return super(badserver, self).process_request(socket, address)
+
+ server.MercurialHTTPServer = badserver
--- a/tests/testlib/crash_transaction_late.py Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/testlib/crash_transaction_late.py Fri Feb 18 14:27:43 2022 +0100
@@ -9,7 +9,6 @@
from mercurial import (
error,
- transaction,
)
@@ -18,14 +17,15 @@
def reposetup(ui, repo):
-
- transaction.postfinalizegenerators.add(b'late-abort')
-
class LateAbortRepo(repo.__class__):
def transaction(self, *args, **kwargs):
tr = super(LateAbortRepo, self).transaction(*args, **kwargs)
tr.addfilegenerator(
- b'late-abort', [b'late-abort'], abort, order=9999999
+ b'late-abort',
+ [b'late-abort'],
+ abort,
+ order=9999999,
+ post_finalize=True,
)
return tr
--- a/tests/testlib/push-checkheads-util.sh Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/testlib/push-checkheads-util.sh Fri Feb 18 14:27:43 2022 +0100
@@ -41,4 +41,10 @@
mkcommit A0
cd ..
hg clone server client
+
+ if [ "$1" = "single-head" ]; then
+ echo >> "server/.hg/hgrc" "[experimental]"
+ echo >> "server/.hg/hgrc" "# enforce a single name per branch"
+ echo >> "server/.hg/hgrc" "single-head-per-branch = yes"
+ fi
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/stream_clone_setup.sh Fri Feb 18 14:27:43 2022 +0100
@@ -0,0 +1,97 @@
+# setup some files and commit for a good stream clone testing.
+
+touch foo
+hg -q commit -A -m initial
+
+python3 << EOF
+for i in range(1024):
+ with open(str(i), 'wb') as fh:
+ fh.write(b"%d" % i) and None
+EOF
+hg -q commit -A -m 'add a lot of files'
+
+# (the status call is to check for issue5130)
+
+hg st
+
+# add files with "tricky" name:
+
+echo foo > 00changelog.i
+echo foo > 00changelog.d
+echo foo > 00changelog.n
+echo foo > 00changelog-ab349180a0405010.nd
+echo foo > 00manifest.i
+echo foo > 00manifest.d
+echo foo > foo.i
+echo foo > foo.d
+echo foo > foo.n
+echo foo > undo.py
+echo foo > undo.i
+echo foo > undo.d
+echo foo > undo.n
+echo foo > undo.foo.i
+echo foo > undo.foo.d
+echo foo > undo.foo.n
+echo foo > undo.babar
+mkdir savanah
+echo foo > savanah/foo.i
+echo foo > savanah/foo.d
+echo foo > savanah/foo.n
+echo foo > savanah/undo.py
+echo foo > savanah/undo.i
+echo foo > savanah/undo.d
+echo foo > savanah/undo.n
+echo foo > savanah/undo.foo.i
+echo foo > savanah/undo.foo.d
+echo foo > savanah/undo.foo.n
+echo foo > savanah/undo.babar
+mkdir data
+echo foo > data/foo.i
+echo foo > data/foo.d
+echo foo > data/foo.n
+echo foo > data/undo.py
+echo foo > data/undo.i
+echo foo > data/undo.d
+echo foo > data/undo.n
+echo foo > data/undo.foo.i
+echo foo > data/undo.foo.d
+echo foo > data/undo.foo.n
+echo foo > data/undo.babar
+mkdir meta
+echo foo > meta/foo.i
+echo foo > meta/foo.d
+echo foo > meta/foo.n
+echo foo > meta/undo.py
+echo foo > meta/undo.i
+echo foo > meta/undo.d
+echo foo > meta/undo.n
+echo foo > meta/undo.foo.i
+echo foo > meta/undo.foo.d
+echo foo > meta/undo.foo.n
+echo foo > meta/undo.babar
+mkdir store
+echo foo > store/foo.i
+echo foo > store/foo.d
+echo foo > store/foo.n
+echo foo > store/undo.py
+echo foo > store/undo.i
+echo foo > store/undo.d
+echo foo > store/undo.n
+echo foo > store/undo.foo.i
+echo foo > store/undo.foo.d
+echo foo > store/undo.foo.n
+echo foo > store/undo.babar
+
+# Name with special characters
+
+echo foo > store/CélesteVille_is_a_Capital_City
+
+# name causing issue6581
+
+mkdir -p container/isam-build-centos7/
+touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
+
+# Add all that
+
+hg add .
+hg ci -m 'add files with "tricky" name'
--- a/tests/wireprotohelpers.sh Fri Feb 18 12:55:39 2022 +0100
+++ b/tests/wireprotohelpers.sh Fri Feb 18 14:27:43 2022 +0100
@@ -1,44 +1,23 @@
-HTTPV2=exp-http-v2-0003
MEDIATYPE=application/mercurial-exp-framing-0006
sendhttpraw() {
hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/
}
-sendhttpv2peer() {
- hg --config experimental.httppeer.v2-encoder-order=identity debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/
-}
-
-sendhttpv2peerverbose() {
- hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/
-}
-
-sendhttpv2peerhandshake() {
- hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --peer http2 http://$LOCALIP:$HGPORT/
-}
-
cat > dummycommands.py << EOF
from mercurial import (
wireprototypes,
wireprotov1server,
- wireprotov2server,
)
@wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')
def customreadonlyv1(repo, proto):
return wireprototypes.bytesresponse(b'customreadonly bytes response')
-@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')
-def customreadonlyv2(repo, proto):
- yield b'customreadonly bytes response'
-
@wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')
def customreadwrite(repo, proto):
return wireprototypes.bytesresponse(b'customreadwrite bytes response')
-@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')
-def customreadwritev2(repo, proto):
- yield b'customreadwrite bytes response'
EOF
cat >> $HGRCPATH << EOF
@@ -53,20 +32,3 @@
EOF
}
-enablehttpv2() {
- cat >> $1/.hg/hgrc << EOF
-[experimental]
-web.apiserver = true
-web.api.http-v2 = true
-EOF
-}
-
-enablehttpv2client() {
- cat >> $HGRCPATH << EOF
-[experimental]
-httppeer.advertise-v2 = true
-# So tests are in plain text. Also, zstd isn't available in all installs,
-# which would make tests non-deterministic.
-httppeer.v2-encoder-order = identity
-EOF
-}
--- a/tests/wireprotosimplecache.py Fri Feb 18 12:55:39 2022 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,220 +0,0 @@
-# wireprotosimplecache.py - Extension providing in-memory wire protocol cache
-#
-# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial import (
- extensions,
- registrar,
- util,
- wireprotoserver,
- wireprototypes,
- wireprotov2server,
-)
-from mercurial.interfaces import (
- repository,
- util as interfaceutil,
-)
-from mercurial.utils import stringutil
-
-CACHE = None
-
-configtable = {}
-configitem = registrar.configitem(configtable)
-
-configitem(b'simplecache', b'cacheapi', default=False)
-configitem(b'simplecache', b'cacheobjects', default=False)
-configitem(b'simplecache', b'redirectsfile', default=None)
-
-# API handler that makes cached keys available.
-def handlecacherequest(rctx, req, res, checkperm, urlparts):
- if rctx.repo.ui.configbool(b'simplecache', b'cacheobjects'):
- res.status = b'500 Internal Server Error'
- res.setbodybytes(b'cacheobjects not supported for api server')
- return
-
- if not urlparts:
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(b'simple cache server')
- return
-
- key = b'/'.join(urlparts)
-
- if key not in CACHE:
- res.status = b'404 Not Found'
- res.headers[b'Content-Type'] = b'text/plain'
- res.setbodybytes(b'key not found in cache')
- return
-
- res.status = b'200 OK'
- res.headers[b'Content-Type'] = b'application/mercurial-cbor'
- res.setbodybytes(CACHE[key])
-
-
-def cachedescriptor(req, repo):
- return {}
-
-
-wireprotoserver.API_HANDLERS[b'simplecache'] = {
- b'config': (b'simplecache', b'cacheapi'),
- b'handler': handlecacherequest,
- b'apidescriptor': cachedescriptor,
-}
-
-
-@interfaceutil.implementer(repository.iwireprotocolcommandcacher)
-class memorycacher(object):
- def __init__(
- self, ui, command, encodefn, redirecttargets, redirecthashes, req
- ):
- self.ui = ui
- self.encodefn = encodefn
- self.redirecttargets = redirecttargets
- self.redirecthashes = redirecthashes
- self.req = req
- self.key = None
- self.cacheobjects = ui.configbool(b'simplecache', b'cacheobjects')
- self.cacheapi = ui.configbool(b'simplecache', b'cacheapi')
- self.buffered = []
-
- ui.log(b'simplecache', b'cacher constructed for %s\n', command)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exctype, excvalue, exctb):
- if exctype:
- self.ui.log(b'simplecache', b'cacher exiting due to error\n')
-
- def adjustcachekeystate(self, state):
- # Needed in order to make tests deterministic. Don't copy this
- # pattern for production caches!
- del state[b'repo']
-
- def setcachekey(self, key):
- self.key = key
- return True
-
- def lookup(self):
- if self.key not in CACHE:
- self.ui.log(b'simplecache', b'cache miss for %s\n', self.key)
- return None
-
- entry = CACHE[self.key]
- self.ui.log(b'simplecache', b'cache hit for %s\n', self.key)
-
- redirectable = True
-
- if not self.cacheapi:
- redirectable = False
- elif not self.redirecttargets:
- redirectable = False
- else:
- clienttargets = set(self.redirecttargets)
- ourtargets = {t[b'name'] for t in loadredirecttargets(self.ui)}
-
- # We only ever redirect to a single target (for now). So we don't
- # need to store which target matched.
- if not clienttargets & ourtargets:
- redirectable = False
-
- if redirectable:
- paths = self.req.dispatchparts[:-3]
- paths.append(b'simplecache')
- paths.append(self.key)
-
- url = b'%s/%s' % (self.req.baseurl, b'/'.join(paths))
-
- # url = b'http://example.com/%s' % self.key
- self.ui.log(
- b'simplecache',
- b'sending content redirect for %s to ' b'%s\n',
- self.key,
- url,
- )
- response = wireprototypes.alternatelocationresponse(
- url=url, mediatype=b'application/mercurial-cbor'
- )
-
- return {b'objs': [response]}
-
- if self.cacheobjects:
- return {
- b'objs': entry,
- }
- else:
- return {
- b'objs': [wireprototypes.encodedresponse(entry)],
- }
-
- def onobject(self, obj):
- if self.cacheobjects:
- self.buffered.append(obj)
- else:
- self.buffered.extend(self.encodefn(obj))
-
- yield obj
-
- def onfinished(self):
- self.ui.log(b'simplecache', b'storing cache entry for %s\n', self.key)
- if self.cacheobjects:
- CACHE[self.key] = self.buffered
- else:
- CACHE[self.key] = b''.join(self.buffered)
-
- return []
-
-
-def makeresponsecacher(
- orig,
- repo,
- proto,
- command,
- args,
- objencoderfn,
- redirecttargets,
- redirecthashes,
-):
- return memorycacher(
- repo.ui,
- command,
- objencoderfn,
- redirecttargets,
- redirecthashes,
- proto._req,
- )
-
-
-def loadredirecttargets(ui):
- path = ui.config(b'simplecache', b'redirectsfile')
- if not path:
- return []
-
- with open(path, 'rb') as fh:
- s = fh.read()
-
- return stringutil.evalpythonliteral(s)
-
-
-def getadvertisedredirecttargets(orig, repo, proto):
- return loadredirecttargets(repo.ui)
-
-
-def extsetup(ui):
- global CACHE
-
- CACHE = util.lrucachedict(10000)
-
- extensions.wrapfunction(
- wireprotov2server, b'makeresponsecacher', makeresponsecacher
- )
- extensions.wrapfunction(
- wireprotov2server,
- b'getadvertisedredirecttargets',
- getadvertisedredirecttargets,
- )