Mercurial > hg-stable
changeset 42922:f059d6ffcdf0
merge with stable
author | Augie Fackler <augie@google.com> |
---|---|
date | Mon, 09 Sep 2019 17:26:17 -0400 |
parents | 96b22e58dc67 (diff) e26c2440a030 (current diff) |
children | 6ccf539aec71 |
files | mercurial/merge.py tests/run-tests.py |
diffstat | 265 files changed, 8716 insertions(+), 3874 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/automation/README.rst Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/README.rst Mon Sep 09 17:26:17 2019 -0400 @@ -181,3 +181,25 @@ Documenting them is beyond the scope of this document. Various tests also require other optional dependencies and missing dependencies will be printed by the test runner when a test is skipped. + +Releasing Windows Artifacts +=========================== + +The `automation.py` script can be used to automate the release of Windows +artifacts:: + + $ ./automation.py build-all-windows-packages --revision 5.1.1 + $ ./automation.py publish-windows-artifacts 5.1.1 + +The first command will launch an EC2 instance to build all Windows packages +and copy them into the `dist` directory relative to the repository root. The +second command will then attempt to upload these files to PyPI (via `twine`) +and to `mercurial-scm.org` (via SSH). + +Uploading to PyPI requires a PyPI account with write access to the `Mercurial` +package. You can skip PyPI uploading by passing `--no-pypi`. + +Uploading to `mercurial-scm.org` requires an SSH account on that server +with `windows` group membership and for the SSH key for that account to be the +default SSH key (e.g. `~/.ssh/id_rsa`) or in a running SSH agent. You can +skip `mercurial-scm.org` uploading by passing `--no-mercurial-scm-org`.
--- a/contrib/automation/hgautomation/aws.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/hgautomation/aws.py Mon Sep 09 17:26:17 2019 -0400 @@ -970,7 +970,7 @@ 'DeviceName': image.block_device_mappings[0]['DeviceName'], 'Ebs': { 'DeleteOnTermination': True, - 'VolumeSize': 8, + 'VolumeSize': 12, 'VolumeType': 'gp2', }, }
--- a/contrib/automation/hgautomation/cli.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/hgautomation/cli.py Mon Sep 09 17:26:17 2019 -0400 @@ -185,6 +185,14 @@ test_flags) +def publish_windows_artifacts(hg: HGAutomation, aws_region, version: str, + pypi: bool, mercurial_scm_org: bool, + ssh_username: str): + windows.publish_artifacts(DIST_PATH, version, + pypi=pypi, mercurial_scm_org=mercurial_scm_org, + ssh_username=ssh_username) + + def get_parser(): parser = argparse.ArgumentParser() @@ -403,6 +411,34 @@ ) sp.set_defaults(func=run_tests_windows) + sp = subparsers.add_parser( + 'publish-windows-artifacts', + help='Publish built Windows artifacts (wheels, installers, etc)' + ) + sp.add_argument( + '--no-pypi', + dest='pypi', + action='store_false', + default=True, + help='Skip uploading to PyPI', + ) + sp.add_argument( + '--no-mercurial-scm-org', + dest='mercurial_scm_org', + action='store_false', + default=True, + help='Skip uploading to www.mercurial-scm.org', + ) + sp.add_argument( + '--ssh-username', + help='SSH username for mercurial-scm.org', + ) + sp.add_argument( + 'version', + help='Mercurial version string to locate local packages', + ) + sp.set_defaults(func=publish_windows_artifacts) + return parser
--- a/contrib/automation/hgautomation/linux.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/hgautomation/linux.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,11 +28,11 @@ INSTALL_PYTHONS = r''' PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1" -PYENV3_VERSIONS="3.5.7 3.6.8 3.7.3 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1" +PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1" git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv pushd /hgdev/pyenv -git checkout 3faeda67bb33e07750d1a104271369a7384ca45c +git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2 popd export PYENV_ROOT="/hgdev/pyenv" @@ -65,6 +65,18 @@ '''.lstrip().replace('\r\n', '\n') +INSTALL_RUST = r''' +RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076 +wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init +echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check - + +chmod +x rustup-init +sudo -H -u hg -g hg ./rustup-init -y +sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2 +sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy +''' + + BOOTSTRAP_VIRTUALENV = r''' /usr/bin/virtualenv /hgdev/venv-bootstrap @@ -286,6 +298,8 @@ # Will be normalized to hg:hg later. sudo chown `whoami` /hgdev +{install_rust} + cp requirements-py2.txt /hgdev/requirements-py2.txt cp requirements-py3.txt /hgdev/requirements-py3.txt @@ -309,6 +323,7 @@ sudo chown -R hg:hg /hgdev '''.lstrip().format( + install_rust=INSTALL_RUST, install_pythons=INSTALL_PYTHONS, bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV ).replace('\r\n', '\n')
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/automation/hgautomation/pypi.py Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,25 @@ +# pypi.py - Automation around PyPI +# +# Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +# no-check-code because Python 3 native. + +from twine.commands.upload import ( + upload as twine_upload, +) +from twine.settings import ( + Settings, +) + + +def upload(paths): + """Upload files to PyPI. + + `paths` is an iterable of `pathlib.Path`. + """ + settings = Settings() + + twine_upload(settings, [str(p) for p in paths])
--- a/contrib/automation/hgautomation/windows.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/hgautomation/windows.py Mon Sep 09 17:26:17 2019 -0400 @@ -7,12 +7,17 @@ # no-check-code because Python 3 native. +import datetime import os +import paramiko import pathlib import re import subprocess import tempfile +from .pypi import ( + upload as pypi_upload, +) from .winrm import ( run_powershell, ) @@ -100,6 +105,26 @@ }} ''' +X86_WHEEL_FILENAME = 'mercurial-{version}-cp27-cp27m-win32.whl' +X64_WHEEL_FILENAME = 'mercurial-{version}-cp27-cp27m-win_amd64.whl' +X86_EXE_FILENAME = 'Mercurial-{version}.exe' +X64_EXE_FILENAME = 'Mercurial-{version}-x64.exe' +X86_MSI_FILENAME = 'mercurial-{version}-x86.msi' +X64_MSI_FILENAME = 'mercurial-{version}-x64.msi' + +MERCURIAL_SCM_BASE_URL = 'https://mercurial-scm.org/release/windows' + +X86_USER_AGENT_PATTERN = '.*Windows.*' +X64_USER_AGENT_PATTERN = '.*Windows.*(WOW|x)64.*' + +X86_EXE_DESCRIPTION = ('Mercurial {version} Inno Setup installer - x86 Windows ' + '- does not require admin rights') +X64_EXE_DESCRIPTION = ('Mercurial {version} Inno Setup installer - x64 Windows ' + '- does not require admin rights') +X86_MSI_DESCRIPTION = ('Mercurial {version} MSI installer - x86 Windows ' + '- requires admin rights') +X64_MSI_DESCRIPTION = ('Mercurial {version} MSI installer - x64 Windows ' + '- requires admin rights') def get_vc_prefix(arch): if arch == 'x86': @@ -296,3 +321,152 @@ ) run_powershell(winrm_client, ps) + + +def resolve_wheel_artifacts(dist_path: pathlib.Path, version: str): + return ( + dist_path / X86_WHEEL_FILENAME.format(version=version), + dist_path / X64_WHEEL_FILENAME.format(version=version), + ) + + +def resolve_all_artifacts(dist_path: pathlib.Path, version: str): + return ( + dist_path / X86_WHEEL_FILENAME.format(version=version), + dist_path / X64_WHEEL_FILENAME.format(version=version), + dist_path / X86_EXE_FILENAME.format(version=version), + dist_path / X64_EXE_FILENAME.format(version=version), + dist_path / X86_MSI_FILENAME.format(version=version), + dist_path / X64_MSI_FILENAME.format(version=version), + ) + + +def generate_latest_dat(version: str): + x86_exe_filename = X86_EXE_FILENAME.format(version=version) + x64_exe_filename = X64_EXE_FILENAME.format(version=version) + x86_msi_filename = X86_MSI_FILENAME.format(version=version) + x64_msi_filename = X64_MSI_FILENAME.format(version=version) + + entries = ( + ( + '10', + version, + X86_USER_AGENT_PATTERN, + '%s/%s' % (MERCURIAL_SCM_BASE_URL, x86_exe_filename), + X86_EXE_DESCRIPTION.format(version=version), + ), + ( + '10', + version, + X64_USER_AGENT_PATTERN, + '%s/%s' % (MERCURIAL_SCM_BASE_URL, x64_exe_filename), + X64_EXE_DESCRIPTION.format(version=version), + ), + ( + '10', + version, + X86_USER_AGENT_PATTERN, + '%s/%s' % (MERCURIAL_SCM_BASE_URL, x86_msi_filename), + X86_MSI_DESCRIPTION.format(version=version), + ), + ( + '10', + version, + X64_USER_AGENT_PATTERN, + '%s/%s' % (MERCURIAL_SCM_BASE_URL, x64_msi_filename), + X64_MSI_DESCRIPTION.format(version=version) + ) + ) + + lines = ['\t'.join(e) for e in entries] + + return '\n'.join(lines) + '\n' + + +def publish_artifacts_pypi(dist_path: pathlib.Path, version: str): + """Publish Windows release artifacts to PyPI.""" + + wheel_paths = resolve_wheel_artifacts(dist_path, version) + + for p in wheel_paths: + if not p.exists(): + raise Exception('%s not found' % p) + + print('uploading wheels to PyPI (you may be prompted for credentials)') + pypi_upload(wheel_paths) + + +def publish_artifacts_mercurial_scm_org(dist_path: pathlib.Path, version: str, + ssh_username=None): + """Publish Windows release artifacts to mercurial-scm.org.""" + all_paths = resolve_all_artifacts(dist_path, version) + + for p in all_paths: + if not p.exists(): + raise Exception('%s not found' % p) + + client = paramiko.SSHClient() + client.load_system_host_keys() + # We assume the system SSH configuration knows how to connect. + print('connecting to mercurial-scm.org via ssh...') + try: + client.connect('mercurial-scm.org', username=ssh_username) + except paramiko.AuthenticationException: + print('error authenticating; is an SSH key available in an SSH agent?') + raise + + print('SSH connection established') + + print('opening SFTP client...') + sftp = client.open_sftp() + print('SFTP client obtained') + + for p in all_paths: + dest_path = '/var/www/release/windows/%s' % p.name + print('uploading %s to %s' % (p, dest_path)) + + with p.open('rb') as fh: + data = fh.read() + + with sftp.open(dest_path, 'wb') as fh: + fh.write(data) + fh.chmod(0o0664) + + latest_dat_path = '/var/www/release/windows/latest.dat' + + now = datetime.datetime.utcnow() + backup_path = dist_path / ( + 'latest-windows-%s.dat' % now.strftime('%Y%m%dT%H%M%S')) + print('backing up %s to %s' % (latest_dat_path, backup_path)) + + with sftp.open(latest_dat_path, 'rb') as fh: + latest_dat_old = fh.read() + + with backup_path.open('wb') as fh: + fh.write(latest_dat_old) + + print('writing %s with content:' % latest_dat_path) + latest_dat_content = generate_latest_dat(version) + print(latest_dat_content) + + with sftp.open(latest_dat_path, 'wb') as fh: + fh.write(latest_dat_content.encode('ascii')) + + +def publish_artifacts(dist_path: pathlib.Path, version: str, + pypi=True, mercurial_scm_org=True, + ssh_username=None): + """Publish Windows release artifacts. + + Files are found in `dist_path`. We will look for files with version string + `version`. + + `pypi` controls whether we upload to PyPI. + `mercurial_scm_org` controls whether we upload to mercurial-scm.org. + """ + if pypi: + publish_artifacts_pypi(dist_path, version) + + if mercurial_scm_org: + publish_artifacts_mercurial_scm_org(dist_path, version, + ssh_username=ssh_username)
--- a/contrib/automation/linux-requirements-py2.txt Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/linux-requirements-py2.txt Mon Sep 09 17:26:17 2019 -0400 @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in +# pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py2.txt contrib/automation/linux-requirements.txt.in # astroid==1.6.6 \ --hash=sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756 \ @@ -22,10 +22,10 @@ --hash=sha256:509f9419ee91cdd00ba34443217d5ca51f5a364a404e1dce9e8979cea969ca48 \ --hash=sha256:f5260a6e679d2ff42ec91ec5252f4eeffdcf21053db9113bd0a8e4d953769c00 \ # via vcrpy -docutils==0.14 \ - --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ - --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ - --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 +docutils==0.15.2 \ + --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \ + --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \ + --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 enum34==1.1.6 \ --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \ --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \ @@ -36,83 +36,70 @@ --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \ --hash=sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50 \ # via mock -futures==3.2.0 \ - --hash=sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265 \ - --hash=sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1 \ +futures==3.3.0 \ + --hash=sha256:49b3f5b064b6e3afc3316421a3f25f66c137ae88f068abbf72830170033c5e16 \ + --hash=sha256:7e033af76a5e35f58e56da7a91e687706faf4e7bdfb2cbc3f2cca6b9bcda9794 \ # via isort fuzzywuzzy==0.17.0 \ --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \ --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62 -isort==4.3.17 \ - --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \ - --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \ +isort==4.3.21 \ + --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \ + --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \ # via pylint -lazy-object-proxy==1.3.1 \ - --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \ - --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \ - --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \ - --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \ - --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \ - --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \ - --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \ - --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \ - --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \ - --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \ - --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \ - --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \ - --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \ - --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \ - --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \ - --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \ - --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \ - --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \ - --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \ - --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \ - --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \ - --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \ - --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \ - --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \ - --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \ - --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \ - --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \ - --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \ - --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \ +lazy-object-proxy==1.4.1 \ + --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \ + --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \ + --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \ + --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \ + --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \ + --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \ + --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \ + --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \ + --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \ + --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \ + --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \ + --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \ + --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \ + --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \ + --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \ + --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \ + --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \ + --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \ # via astroid mccabe==0.6.1 \ --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \ # via pylint -mock==2.0.0 \ - --hash=sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1 \ - --hash=sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba \ +mock==3.0.5 \ + --hash=sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3 \ + --hash=sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8 \ # via vcrpy -pbr==5.1.3 \ - --hash=sha256:8257baf496c8522437e8a6cfe0f15e00aedc6c0e0e7c9d55eeeeab31e0853843 \ - --hash=sha256:8c361cc353d988e4f5b998555c88098b9d5964c2e11acf7b0d21925a66bb5824 \ - # via mock pyflakes==2.1.1 \ --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \ --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2 -pygments==2.3.1 \ - --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \ - --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d -pylint==1.9.4 \ - --hash=sha256:02c2b6d268695a8b64ad61847f92e611e6afcff33fd26c3a2125370c4662905d \ - --hash=sha256:ee1e85575587c5b58ddafa25e1c1b01691ef172e139fc25585e5d3f02451da93 +pygments==2.4.2 \ + --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \ + --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 +pylint==1.9.5 \ + --hash=sha256:367e3d49813d349a905390ac27989eff82ab84958731c5ef0bef867452cfdc42 \ + --hash=sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300 python-levenshtein==0.12.0 \ --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 -pyyaml==5.1 \ - --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \ - --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \ - --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \ - --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \ - --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \ - --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \ - --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \ - --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \ - --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \ - --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \ - --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \ +pyyaml==5.1.2 \ + --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \ + --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \ + --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \ + --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \ + --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \ + --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \ + --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \ + --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \ + --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \ + --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \ + --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \ + --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \ + --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \ # via vcrpy singledispatch==3.4.0.3 \ --hash=sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c \ @@ -125,6 +112,10 @@ vcrpy==2.0.1 \ --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \ --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f -wrapt==1.11.1 \ - --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \ +wrapt==1.11.2 \ + --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \ # via astroid, vcrpy + +# WARNING: The following packages were not pinned, but pip requires them to be +# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. +# setuptools==41.0.1 # via python-levenshtein
--- a/contrib/automation/linux-requirements-py3.txt Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/linux-requirements-py3.txt Mon Sep 09 17:26:17 2019 -0400 @@ -2,16 +2,16 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -U --generate-hashes --output-file contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in +# pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.txt contrib/automation/linux-requirements.txt.in # astroid==2.2.5 \ --hash=sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4 \ --hash=sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4 \ # via pylint -docutils==0.14 \ - --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ - --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ - --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 +docutils==0.15.2 \ + --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \ + --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \ + --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 fuzzywuzzy==0.17.0 \ --hash=sha256:5ac7c0b3f4658d2743aa17da53a55598144edbc5bee3c6863840636e6926f254 \ --hash=sha256:6f49de47db00e1c71d40ad16da42284ac357936fa9b66bea1df63fed07122d62 @@ -19,40 +19,29 @@ --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \ --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \ # via yarl -isort==4.3.17 \ - --hash=sha256:01cb7e1ca5e6c5b3f235f0385057f70558b70d2f00320208825fa62887292f43 \ - --hash=sha256:268067462aed7eb2a1e237fcb287852f22077de3fb07964e87e00f829eea2d1a \ +isort==4.3.21 \ + --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \ + --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \ # via pylint -lazy-object-proxy==1.3.1 \ - --hash=sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33 \ - --hash=sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39 \ - --hash=sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019 \ - --hash=sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088 \ - --hash=sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b \ - --hash=sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e \ - --hash=sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6 \ - --hash=sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b \ - --hash=sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5 \ - --hash=sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff \ - --hash=sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd \ - --hash=sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7 \ - --hash=sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff \ - --hash=sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d \ - --hash=sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2 \ - --hash=sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35 \ - --hash=sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4 \ - --hash=sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514 \ - --hash=sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252 \ - --hash=sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109 \ - --hash=sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f \ - --hash=sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c \ - --hash=sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92 \ - --hash=sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577 \ - --hash=sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d \ - --hash=sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d \ - --hash=sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f \ - --hash=sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a \ - --hash=sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b \ +lazy-object-proxy==1.4.1 \ + --hash=sha256:159a745e61422217881c4de71f9eafd9d703b93af95618635849fe469a283661 \ + --hash=sha256:23f63c0821cc96a23332e45dfaa83266feff8adc72b9bcaef86c202af765244f \ + --hash=sha256:3b11be575475db2e8a6e11215f5aa95b9ec14de658628776e10d96fa0b4dac13 \ + --hash=sha256:3f447aff8bc61ca8b42b73304f6a44fa0d915487de144652816f950a3f1ab821 \ + --hash=sha256:4ba73f6089cd9b9478bc0a4fa807b47dbdb8fad1d8f31a0f0a5dbf26a4527a71 \ + --hash=sha256:4f53eadd9932055eac465bd3ca1bd610e4d7141e1278012bd1f28646aebc1d0e \ + --hash=sha256:64483bd7154580158ea90de5b8e5e6fc29a16a9b4db24f10193f0c1ae3f9d1ea \ + --hash=sha256:6f72d42b0d04bfee2397aa1862262654b56922c20a9bb66bb76b6f0e5e4f9229 \ + --hash=sha256:7c7f1ec07b227bdc561299fa2328e85000f90179a2f44ea30579d38e037cb3d4 \ + --hash=sha256:7c8b1ba1e15c10b13cad4171cfa77f5bb5ec2580abc5a353907780805ebe158e \ + --hash=sha256:8559b94b823f85342e10d3d9ca4ba5478168e1ac5658a8a2f18c991ba9c52c20 \ + --hash=sha256:a262c7dfb046f00e12a2bdd1bafaed2408114a89ac414b0af8755c696eb3fc16 \ + --hash=sha256:acce4e3267610c4fdb6632b3886fe3f2f7dd641158a843cf6b6a68e4ce81477b \ + --hash=sha256:be089bb6b83fac7f29d357b2dc4cf2b8eb8d98fe9d9ff89f9ea6012970a853c7 \ + --hash=sha256:bfab710d859c779f273cc48fb86af38d6e9210f38287df0069a63e40b45a2f5c \ + --hash=sha256:c10d29019927301d524a22ced72706380de7cfc50f767217485a912b4c8bd82a \ + --hash=sha256:dd6e2b598849b3d7aee2295ac765a578879830fb8966f70be8cd472e6069932e \ + --hash=sha256:e408f1eacc0a68fed0c08da45f31d0ebb38079f043328dce69ff133b95c29dc1 \ # via astroid mccabe==0.6.1 \ --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ @@ -92,57 +81,54 @@ pyflakes==2.1.1 \ --hash=sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0 \ --hash=sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2 -pygments==2.3.1 \ - --hash=sha256:5ffada19f6203563680669ee7f53b64dabbeb100eb51b61996085e99c03b284a \ - --hash=sha256:e8218dd399a61674745138520d0d4cf2621d7e032439341bc3f647bff125818d +pygments==2.4.2 \ + --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \ + --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 pylint==2.3.1 \ --hash=sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09 \ --hash=sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1 python-levenshtein==0.12.0 \ --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 -pyyaml==5.1 \ - --hash=sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c \ - --hash=sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95 \ - --hash=sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2 \ - --hash=sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4 \ - --hash=sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad \ - --hash=sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba \ - --hash=sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1 \ - --hash=sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e \ - --hash=sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673 \ - --hash=sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13 \ - --hash=sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19 \ +pyyaml==5.1.2 \ + --hash=sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9 \ + --hash=sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4 \ + --hash=sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8 \ + --hash=sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696 \ + --hash=sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34 \ + --hash=sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9 \ + --hash=sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73 \ + --hash=sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299 \ + --hash=sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b \ + --hash=sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae \ + --hash=sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681 \ + --hash=sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41 \ + --hash=sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8 \ # via vcrpy six==1.12.0 \ --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ # via astroid, vcrpy -typed-ast==1.3.4 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ - --hash=sha256:04894d268ba6eab7e093d43107869ad49e7b5ef40d1a94243ea49b352061b200 \ - --hash=sha256:16616ece19daddc586e499a3d2f560302c11f122b9c692bc216e821ae32aa0d0 \ - --hash=sha256:252fdae740964b2d3cdfb3f84dcb4d6247a48a6abe2579e8029ab3be3cdc026c \ - --hash=sha256:2af80a373af123d0b9f44941a46df67ef0ff7a60f95872412a145f4500a7fc99 \ - --hash=sha256:2c88d0a913229a06282b285f42a31e063c3bf9071ff65c5ea4c12acb6977c6a7 \ - --hash=sha256:2ea99c029ebd4b5a308d915cc7fb95b8e1201d60b065450d5d26deb65d3f2bc1 \ - --hash=sha256:3d2e3ab175fc097d2a51c7a0d3fda442f35ebcc93bb1d7bd9b95ad893e44c04d \ - --hash=sha256:4766dd695548a15ee766927bf883fb90c6ac8321be5a60c141f18628fb7f8da8 \ - --hash=sha256:56b6978798502ef66625a2e0f80cf923da64e328da8bbe16c1ff928c70c873de \ - --hash=sha256:5cddb6f8bce14325b2863f9d5ac5c51e07b71b462361fd815d1d7706d3a9d682 \ - --hash=sha256:644ee788222d81555af543b70a1098f2025db38eaa99226f3a75a6854924d4db \ - --hash=sha256:64cf762049fc4775efe6b27161467e76d0ba145862802a65eefc8879086fc6f8 \ - --hash=sha256:68c362848d9fb71d3c3e5f43c09974a0ae319144634e7a47db62f0f2a54a7fa7 \ - --hash=sha256:6c1f3c6f6635e611d58e467bf4371883568f0de9ccc4606f17048142dec14a1f \ - --hash=sha256:b213d4a02eec4ddf622f4d2fbc539f062af3788d1f332f028a2e19c42da53f15 \ - --hash=sha256:bb27d4e7805a7de0e35bd0cb1411bc85f807968b2b0539597a49a23b00a622ae \ - --hash=sha256:c9d414512eaa417aadae7758bc118868cd2396b0e6138c1dd4fda96679c079d3 \ - --hash=sha256:f0937165d1e25477b01081c4763d2d9cdc3b18af69cb259dd4f640c9b900fe5e \ - --hash=sha256:fb96a6e2c11059ecf84e6741a319f93f683e440e341d4489c9b161eca251cf2a \ - --hash=sha256:fc71d2d6ae56a091a8d94f33ec9d0f2001d1cb1db423d8b4355debfe9ce689b7 +typed-ast==1.4.0 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ + --hash=sha256:18511a0b3e7922276346bcb47e2ef9f38fb90fd31cb9223eed42c85d1312344e \ + --hash=sha256:262c247a82d005e43b5b7f69aff746370538e176131c32dda9cb0f324d27141e \ + --hash=sha256:2b907eb046d049bcd9892e3076c7a6456c93a25bebfe554e931620c90e6a25b0 \ + --hash=sha256:354c16e5babd09f5cb0ee000d54cfa38401d8b8891eefa878ac772f827181a3c \ + --hash=sha256:4e0b70c6fc4d010f8107726af5fd37921b666f5b31d9331f0bd24ad9a088e631 \ + --hash=sha256:630968c5cdee51a11c05a30453f8cd65e0cc1d2ad0d9192819df9978984529f4 \ + --hash=sha256:66480f95b8167c9c5c5c87f32cf437d585937970f3fc24386f313a4c97b44e34 \ + --hash=sha256:71211d26ffd12d63a83e079ff258ac9d56a1376a25bc80b1cdcdf601b855b90b \ + --hash=sha256:95bd11af7eafc16e829af2d3df510cecfd4387f6453355188342c3e79a2ec87a \ + --hash=sha256:bc6c7d3fa1325a0c6613512a093bc2a2a15aeec350451cbdf9e1d4bffe3e3233 \ + --hash=sha256:cc34a6f5b426748a507dd5d1de4c1978f2eb5626d51326e43280941206c209e1 \ + --hash=sha256:d755f03c1e4a51e9b24d899561fec4ccaf51f210d52abdf8c07ee2849b212a36 \ + --hash=sha256:d7c45933b1bdfaf9f36c579671fec15d25b06c8398f113dab64c18ed1adda01d \ + --hash=sha256:d896919306dd0aa22d0132f62a1b78d11aaf4c9fc5b3410d3c666b818191630a \ + --hash=sha256:ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12 vcrpy==2.0.1 \ --hash=sha256:127e79cf7b569d071d1bd761b83f7b62b2ce2a2eb63ceca7aa67cba8f2602ea3 \ --hash=sha256:57be64aa8e9883a4117d0b15de28af62275c001abcdb00b6dc2d4406073d9a4f -wrapt==1.11.1 \ - --hash=sha256:4aea003270831cceb8a90ff27c4031da6ead7ec1886023b80ce0dfe0adf61533 \ +wrapt==1.11.2 \ + --hash=sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1 \ # via astroid, vcrpy yarl==1.3.0 \ --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \ @@ -157,3 +143,7 @@ --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \ --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 \ # via vcrpy + +# WARNING: The following packages were not pinned, but pip requires them to be +# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. +# setuptools==41.0.1 # via python-levenshtein
--- a/contrib/automation/requirements.txt Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/requirements.txt Mon Sep 09 17:26:17 2019 -0400 @@ -2,43 +2,44 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -U --generate-hashes --output-file contrib/automation/requirements.txt contrib/automation/requirements.txt.in +# pip-compile --generate-hashes --output-file=contrib/automation/requirements.txt contrib/automation/requirements.txt.in # asn1crypto==0.24.0 \ --hash=sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87 \ --hash=sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49 \ # via cryptography -bcrypt==3.1.6 \ - --hash=sha256:0ba875eb67b011add6d8c5b76afbd92166e98b1f1efab9433d5dc0fafc76e203 \ - --hash=sha256:21ed446054c93e209434148ef0b362432bb82bbdaf7beef70a32c221f3e33d1c \ - --hash=sha256:28a0459381a8021f57230954b9e9a65bb5e3d569d2c253c5cac6cb181d71cf23 \ - --hash=sha256:2aed3091eb6f51c26b7c2fad08d6620d1c35839e7a362f706015b41bd991125e \ - --hash=sha256:2fa5d1e438958ea90eaedbf8082c2ceb1a684b4f6c75a3800c6ec1e18ebef96f \ - --hash=sha256:3a73f45484e9874252002793518da060fb11eaa76c30713faa12115db17d1430 \ - --hash=sha256:3e489787638a36bb466cd66780e15715494b6d6905ffdbaede94440d6d8e7dba \ - --hash=sha256:44636759d222baa62806bbceb20e96f75a015a6381690d1bc2eda91c01ec02ea \ - --hash=sha256:678c21b2fecaa72a1eded0cf12351b153615520637efcadc09ecf81b871f1596 \ - --hash=sha256:75460c2c3786977ea9768d6c9d8957ba31b5fbeb0aae67a5c0e96aab4155f18c \ - --hash=sha256:8ac06fb3e6aacb0a95b56eba735c0b64df49651c6ceb1ad1cf01ba75070d567f \ - --hash=sha256:8fdced50a8b646fff8fa0e4b1c5fd940ecc844b43d1da5a980cb07f2d1b1132f \ - --hash=sha256:9b2c5b640a2da533b0ab5f148d87fb9989bf9bcb2e61eea6a729102a6d36aef9 \ - --hash=sha256:a9083e7fa9adb1a4de5ac15f9097eb15b04e2c8f97618f1b881af40abce382e1 \ - --hash=sha256:b7e3948b8b1a81c5a99d41da5fb2dc03ddb93b5f96fcd3fd27e643f91efa33e1 \ - --hash=sha256:b998b8ca979d906085f6a5d84f7b5459e5e94a13fc27c28a3514437013b6c2f6 \ - --hash=sha256:dd08c50bc6f7be69cd7ba0769acca28c846ec46b7a8ddc2acf4b9ac6f8a7457e \ - --hash=sha256:de5badee458544ab8125e63e39afeedfcf3aef6a6e2282ac159c95ae7472d773 \ - --hash=sha256:ede2a87333d24f55a4a7338a6ccdccf3eaa9bed081d1737e0db4dbd1a4f7e6b6 \ +bcrypt==3.1.7 \ + --hash=sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89 \ + --hash=sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42 \ + --hash=sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294 \ + --hash=sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161 \ + --hash=sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31 \ + --hash=sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5 \ + --hash=sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c \ + --hash=sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0 \ + --hash=sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de \ + --hash=sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e \ + --hash=sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052 \ + --hash=sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09 \ + --hash=sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105 \ + --hash=sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133 \ + --hash=sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7 \ + --hash=sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc \ # via paramiko -boto3==1.9.137 \ - --hash=sha256:882cc4869b47b51dae4b4a900769e72171ff00e0b6bca644b2d7a7ad7378f324 \ - --hash=sha256:cd503a7e7a04f1c14d2801f9727159dfa88c393b4004e98940fa4aa205d920c8 -botocore==1.12.137 \ - --hash=sha256:0d95794f6b1239c75e2c5f966221bcd4b68020fddb5676f757531eedbb612ed8 \ - --hash=sha256:3213cf48cf2ceee10fc3b93221f2cd1c38521cca7584f547d5c086213cc60f35 \ +bleach==3.1.0 \ + --hash=sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16 \ + --hash=sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa \ + # via readme-renderer +boto3==1.9.223 \ + --hash=sha256:12ceb047c3cfbd2363b35e1c24b082808a1bb9b90f4f0b7375e83d21015bf47b \ + --hash=sha256:6e833a9068309c24d7752e280b2925cf5968a88111bc95fcebc451a09f8b424e +botocore==1.12.223 \ + --hash=sha256:5b943627ad53a6ffb9c1a89c542b30692555ef20996492c6275c65a0e65340c7 \ + --hash=sha256:ce1fa05e241cb8326437a1fef2278e24b56229add6ff71ca2c7e999f33275569 \ # via boto3, s3transfer -certifi==2019.3.9 \ - --hash=sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5 \ - --hash=sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae \ +certifi==2019.6.16 \ + --hash=sha256:046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939 \ + --hash=sha256:945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695 \ # via requests cffi==1.12.3 \ --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \ @@ -74,32 +75,29 @@ --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \ # via requests -cryptography==2.6.1 \ - --hash=sha256:066f815f1fe46020877c5983a7e747ae140f517f1b09030ec098503575265ce1 \ - --hash=sha256:210210d9df0afba9e000636e97810117dc55b7157c903a55716bb73e3ae07705 \ - --hash=sha256:26c821cbeb683facb966045e2064303029d572a87ee69ca5a1bf54bf55f93ca6 \ - --hash=sha256:2afb83308dc5c5255149ff7d3fb9964f7c9ee3d59b603ec18ccf5b0a8852e2b1 \ - --hash=sha256:2db34e5c45988f36f7a08a7ab2b69638994a8923853dec2d4af121f689c66dc8 \ - --hash=sha256:409c4653e0f719fa78febcb71ac417076ae5e20160aec7270c91d009837b9151 \ - --hash=sha256:45a4f4cf4f4e6a55c8128f8b76b4c057027b27d4c67e3fe157fa02f27e37830d \ - --hash=sha256:48eab46ef38faf1031e58dfcc9c3e71756a1108f4c9c966150b605d4a1a7f659 \ - --hash=sha256:6b9e0ae298ab20d371fc26e2129fd683cfc0cfde4d157c6341722de645146537 \ - --hash=sha256:6c4778afe50f413707f604828c1ad1ff81fadf6c110cb669579dea7e2e98a75e \ - --hash=sha256:8c33fb99025d353c9520141f8bc989c2134a1f76bac6369cea060812f5b5c2bb \ - --hash=sha256:9873a1760a274b620a135054b756f9f218fa61ca030e42df31b409f0fb738b6c \ - --hash=sha256:9b069768c627f3f5623b1cbd3248c5e7e92aec62f4c98827059eed7053138cc9 \ - --hash=sha256:9e4ce27a507e4886efbd3c32d120db5089b906979a4debf1d5939ec01b9dd6c5 \ - --hash=sha256:acb424eaca214cb08735f1a744eceb97d014de6530c1ea23beb86d9c6f13c2ad \ - --hash=sha256:c8181c7d77388fe26ab8418bb088b1a1ef5fde058c6926790c8a0a3d94075a4a \ - --hash=sha256:d4afbb0840f489b60f5a580a41a1b9c3622e08ecb5eec8614d4fb4cd914c4460 \ - --hash=sha256:d9ed28030797c00f4bc43c86bf819266c76a5ea61d006cd4078a93ebf7da6bfd \ - --hash=sha256:e603aa7bb52e4e8ed4119a58a03b60323918467ef209e6ff9db3ac382e5cf2c6 \ +cryptography==2.7 \ + --hash=sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c \ + --hash=sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643 \ + --hash=sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216 \ + --hash=sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799 \ + --hash=sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a \ + --hash=sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9 \ + --hash=sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc \ + --hash=sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8 \ + --hash=sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53 \ + --hash=sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1 \ + --hash=sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609 \ + --hash=sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292 \ + --hash=sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e \ + --hash=sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6 \ + --hash=sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed \ + --hash=sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d \ # via paramiko, pypsrp -docutils==0.14 \ - --hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \ - --hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \ - --hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \ - # via botocore +docutils==0.15.2 \ + --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \ + --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \ + --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \ + # via botocore, readme-renderer idna==2.8 \ --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \ --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \ @@ -108,20 +106,24 @@ --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \ --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \ # via boto3, botocore -ntlm-auth==1.3.0 \ - --hash=sha256:bb2fd03c665f0f62c5f65695b62dcdb07fb7a45df6ebc86c770be2054d6902dd \ - --hash=sha256:ce5b4483ed761f341a538a426a71a52e5a9cf5fd834ebef1d2090f9eef14b3f8 \ +ntlm-auth==1.4.0 \ + --hash=sha256:11f7a3cec38155b7cecdd9bbc8c37cd738d8012f0523b3f98d8caefe394feb97 \ + --hash=sha256:350f2389c8ee5517f47db55a36ac2f8efc9742a60a678d6e2caa92385bdcaa9a \ # via pypsrp -paramiko==2.4.2 \ - --hash=sha256:3c16b2bfb4c0d810b24c40155dbfd113c0521e7e6ee593d704e84b4c658a1f3b \ - --hash=sha256:a8975a7df3560c9f1e2b43dc54ebd40fd00a7017392ca5445ce7df409f900fcb -pyasn1==0.4.5 \ - --hash=sha256:da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 \ - --hash=sha256:da6b43a8c9ae93bc80e2739efb38cc776ba74a886e3e9318d65fe81a8b8a2c6e \ - # via paramiko +paramiko==2.6.0 \ + --hash=sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf \ + --hash=sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041 +pkginfo==1.5.0.1 \ + --hash=sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb \ + --hash=sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32 \ + # via twine pycparser==2.19 \ --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \ # via cffi +pygments==2.4.2 \ + --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \ + --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 \ + # via readme-renderer pynacl==1.3.0 \ --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \ --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \ @@ -150,19 +152,42 @@ --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \ --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \ # via botocore -requests==2.21.0 \ - --hash=sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e \ - --hash=sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b \ - # via pypsrp -s3transfer==0.2.0 \ - --hash=sha256:7b9ad3213bff7d357f888e0fab5101b56fa1a0548ee77d121c3a3dbfbef4cb2e \ - --hash=sha256:f23d5cb7d862b104401d9021fc82e5fa0e0cf57b7660a1331425aab0c691d021 \ +readme-renderer==24.0 \ + --hash=sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f \ + --hash=sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d \ + # via twine +requests-toolbelt==0.9.1 \ + --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \ + --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 \ + # via twine +requests==2.22.0 \ + --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \ + --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 \ + # via pypsrp, requests-toolbelt, twine +s3transfer==0.2.1 \ + --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \ + --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \ # via boto3 six==1.12.0 \ --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ - # via bcrypt, cryptography, pynacl, pypsrp, python-dateutil -urllib3==1.24.2 \ - --hash=sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0 \ - --hash=sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3 \ + # via bcrypt, bleach, cryptography, pynacl, pypsrp, python-dateutil, readme-renderer +tqdm==4.35.0 \ + --hash=sha256:1be3e4e3198f2d0e47b928e9d9a8ec1b63525db29095cec1467f4c5a4ea8ebf9 \ + --hash=sha256:7e39a30e3d34a7a6539378e39d7490326253b7ee354878a92255656dc4284457 \ + # via twine +twine==1.13.0 \ + --hash=sha256:0fb0bfa3df4f62076cab5def36b1a71a2e4acb4d1fa5c97475b048117b1a6446 \ + --hash=sha256:d6c29c933ecfc74e9b1d9fa13aa1f87c5d5770e119f5a4ce032092f0ff5b14dc +urllib3==1.25.3 \ + --hash=sha256:b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1 \ + --hash=sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232 \ # via botocore, requests +webencodings==0.5.1 \ + --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ + --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 \ + # via bleach + +# WARNING: The following packages were not pinned, but pip requires them to be +# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. +# setuptools==41.2.0 # via twine
--- a/contrib/automation/requirements.txt.in Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/automation/requirements.txt.in Mon Sep 09 17:26:17 2019 -0400 @@ -1,3 +1,4 @@ boto3 paramiko pypsrp +twine
--- a/contrib/bdiff-torture.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/bdiff-torture.py Mon Sep 09 17:26:17 2019 -0400 @@ -53,8 +53,7 @@ test1(a, b) return except Exception as inst: - pass - print("exception:", inst) + print("exception:", inst) reducetest(a, b) def test(a, b):
--- a/contrib/byteify-strings.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/byteify-strings.py Mon Sep 09 17:26:17 2019 -0400 @@ -78,23 +78,69 @@ already been done. """ - st = tokens[j] - if st.type == token.STRING and st.string.startswith(("'", '"')): - sysstrtokens.add(st) + k = j + currtoken = tokens[k] + while currtoken.type in (token.STRING, token.NEWLINE, tokenize.NL): + k += 1 + if ( + currtoken.type == token.STRING + and currtoken.string.startswith(("'", '"')) + ): + sysstrtokens.add(currtoken) + try: + currtoken = tokens[k] + except IndexError: + break + + def _isitemaccess(j): + """Assert the next tokens form an item access on `tokens[j]` and that + `tokens[j]` is a name. + """ + try: + return ( + tokens[j].type == token.NAME + and _isop(j + 1, '[') + and tokens[j + 2].type == token.STRING + and _isop(j + 3, ']') + ) + except IndexError: + return False + + def _ismethodcall(j, *methodnames): + """Assert the next tokens form a call to `methodname` with a string + as first argument on `tokens[j]` and that `tokens[j]` is a name. + """ + try: + return ( + tokens[j].type == token.NAME + and _isop(j + 1, '.') + and tokens[j + 2].type == token.NAME + and tokens[j + 2].string in methodnames + and _isop(j + 3, '(') + and tokens[j + 4].type == token.STRING + ) + except IndexError: + return False coldelta = 0 # column increment for new opening parens coloffset = -1 # column offset for the current line (-1: TBD) - parens = [(0, 0, 0)] # stack of (line, end-column, column-offset) + parens = [(0, 0, 0, -1)] # stack of (line, end-column, column-offset, type) + ignorenextline = False # don't transform the next line + insideignoreblock = False # don't transform until turned off for i, t in enumerate(tokens): # Compute the column offset for the current line, such that # the current line will be aligned to the last opening paren # as before. if coloffset < 0: - if t.start[1] == parens[-1][1]: - coloffset = parens[-1][2] - elif t.start[1] + 1 == parens[-1][1]: + lastparen = parens[-1] + if t.start[1] == lastparen[1]: + coloffset = lastparen[2] + elif ( + t.start[1] + 1 == lastparen[1] + and lastparen[3] not in (token.NEWLINE, tokenize.NL) + ): # fix misaligned indent of s/util.Abort/error.Abort/ - coloffset = parens[-1][2] + (parens[-1][1] - t.start[1]) + coloffset = lastparen[2] + (lastparen[1] - t.start[1]) else: coloffset = 0 @@ -103,11 +149,26 @@ yield adjusttokenpos(t, coloffset) coldelta = 0 coloffset = -1 + if not insideignoreblock: + ignorenextline = ( + tokens[i - 1].type == token.COMMENT + and tokens[i - 1].string == "# no-py3-transform" + ) + continue + + if t.type == token.COMMENT: + if t.string == "# py3-transform: off": + insideignoreblock = True + if t.string == "# py3-transform: on": + insideignoreblock = False + + if ignorenextline or insideignoreblock: + yield adjusttokenpos(t, coloffset) continue # Remember the last paren position. if _isop(i, '(', '[', '{'): - parens.append(t.end + (coloffset + coldelta,)) + parens.append(t.end + (coloffset + coldelta, tokens[i + 1].type)) elif _isop(i, ')', ']', '}'): parens.pop() @@ -129,8 +190,10 @@ # components touching docstrings need to handle unicode, # unfortunately. if s[0:3] in ("'''", '"""'): - yield adjusttokenpos(t, coloffset) - continue + # If it's assigned to something, it's not a docstring + if not _isop(i - 1, '='): + yield adjusttokenpos(t, coloffset) + continue # If the first character isn't a quote, it is likely a string # prefixing character (such as 'b', 'u', or 'r'. Ignore. @@ -149,8 +212,10 @@ fn = t.string # *attr() builtins don't accept byte strings to 2nd argument. - if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and - not _isop(i - 1, '.')): + if fn in ( + 'getattr', 'setattr', 'hasattr', 'safehasattr', 'wrapfunction', + 'wrapclass', 'addattr' + ) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')): arg1idx = _findargnofcall(1) if arg1idx is not None: _ensuresysstr(arg1idx) @@ -169,6 +234,12 @@ yield adjusttokenpos(t._replace(string=fn[4:]), coloffset) continue + if t.type == token.NAME and t.string in opts['treat-as-kwargs']: + if _isitemaccess(i): + _ensuresysstr(i + 2) + if _ismethodcall(i, 'get', 'pop', 'setdefault', 'popitem'): + _ensuresysstr(i + 4) + # Looks like "if __name__ == '__main__'". if (t.type == token.NAME and t.string == '__name__' and _isop(i + 1, '==')): @@ -207,14 +278,23 @@ def main(): ap = argparse.ArgumentParser() + ap.add_argument('--version', action='version', + version='Byteify strings 1.0') ap.add_argument('-i', '--inplace', action='store_true', default=False, help='edit files in place') ap.add_argument('--dictiter', action='store_true', default=False, help='rewrite iteritems() and itervalues()'), + ap.add_argument('--allow-attr-methods', action='store_true', + default=False, + help='also handle attr*() when they are methods'), + ap.add_argument('--treat-as-kwargs', nargs="+", default=[], + help="ignore kwargs-like objects"), ap.add_argument('files', metavar='FILE', nargs='+', help='source file') args = ap.parse_args() opts = { 'dictiter': args.dictiter, + 'treat-as-kwargs': set(args.treat_as_kwargs), + 'allow-attr-methods': args.allow_attr_methods, } for fname in args.files: if args.inplace:
--- a/contrib/check-code.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/check-code.py Mon Sep 09 17:26:17 2019 -0400 @@ -116,7 +116,6 @@ (r'\bls\b.*-\w*R', "don't use 'ls -R', use 'find'"), (r'printf.*[^\\]\\([1-9]|0\d)', r"don't use 'printf \NNN', use Python"), (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"), - (r'\$\(.*\)', "don't use $(expr), use `expr`"), (r'rm -rf \*', "don't use naked rm -rf, target a directory"), (r'\[[^\]]+==', '[ foo == bar ] is a bashism, use [ foo = bar ] instead'), (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
--- a/contrib/genosxversion.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/genosxversion.py Mon Sep 09 17:26:17 2019 -0400 @@ -2,14 +2,13 @@ from __future__ import absolute_import, print_function import argparse -import json import os import subprocess import sys # Always load hg libraries from the hg we can find on $PATH. -hglib = json.loads(subprocess.check_output( - ['hg', 'debuginstall', '-Tjson']))[0]['hgmodules'] +hglib = subprocess.check_output( + ['hg', 'debuginstall', '-T', '{hgmodules}']) sys.path.insert(0, os.path.dirname(hglib)) from mercurial import util
--- a/contrib/import-checker.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/import-checker.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,9 +28,11 @@ 'mercurial.hgweb.common', 'mercurial.hgweb.request', 'mercurial.i18n', + 'mercurial.interfaces', 'mercurial.node', # for revlog to re-export constant to extensions 'mercurial.revlogutils.constants', + 'mercurial.revlogutils.flagutil', # for cffi modules to re-export pure functions 'mercurial.pure.base85', 'mercurial.pure.bdiff',
--- a/contrib/perf.py Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/perf.py Mon Sep 09 17:26:17 2019 -0400 @@ -126,16 +126,18 @@ getargspec = pycompat.getargspec # added to module after 4.5 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802) _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede) + _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5) _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b) fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e) if pycompat.ispy3: _maxint = sys.maxsize # per py3 docs for replacing maxint else: _maxint = sys.maxint -except (ImportError, AttributeError): +except (NameError, ImportError, AttributeError): import inspect getargspec = inspect.getargspec _byteskwargs = identity + _bytestr = str fsencode = identity # no py3 support _maxint = sys.maxint # no py3 support _sysstr = lambda x: x # no py3 support @@ -144,12 +146,12 @@ try: # 4.7+ queue = pycompat.queue.Queue -except (AttributeError, ImportError): +except (NameError, AttributeError, ImportError): # <4.7. try: queue = pycompat.queue - except (AttributeError, ImportError): - queue = util.queue + except (NameError, AttributeError, ImportError): + import Queue as queue try: from mercurial import logcmdutil @@ -241,6 +243,37 @@ configitem = mercurial.registrar.configitem(configtable) configitem(b'perf', b'presleep', default=mercurial.configitems.dynamicdefault, + experimental=True, + ) + configitem(b'perf', b'stub', + default=mercurial.configitems.dynamicdefault, + experimental=True, + ) + configitem(b'perf', b'parentscount', + default=mercurial.configitems.dynamicdefault, + experimental=True, + ) + configitem(b'perf', b'all-timing', + default=mercurial.configitems.dynamicdefault, + experimental=True, + ) + configitem(b'perf', b'pre-run', + default=mercurial.configitems.dynamicdefault, + ) + configitem(b'perf', b'profile-benchmark', + default=mercurial.configitems.dynamicdefault, + ) + configitem(b'perf', b'run-limits', + default=mercurial.configitems.dynamicdefault, + experimental=True, + ) +except (ImportError, AttributeError): + pass +except TypeError: + # compatibility fix for a11fd395e83f + # hg version: 5.2 + configitem(b'perf', b'presleep', + default=mercurial.configitems.dynamicdefault, ) configitem(b'perf', b'stub', default=mercurial.configitems.dynamicdefault, @@ -260,8 +293,6 @@ configitem(b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault, ) -except (ImportError, AttributeError): - pass def getlen(ui): if ui.configbool(b"perf", b"stub", False): @@ -352,16 +383,16 @@ % item)) continue try: - time_limit = float(pycompat.sysstr(parts[0])) + time_limit = float(_sysstr(parts[0])) except ValueError as e: ui.warn((b'malformatted run limit entry, %s: %s\n' - % (pycompat.bytestr(e), item))) + % (_bytestr(e), item))) continue try: - run_limit = int(pycompat.sysstr(parts[1])) + run_limit = int(_sysstr(parts[1])) except ValueError as e: ui.warn((b'malformatted run limit entry, %s: %s\n' - % (pycompat.bytestr(e), item))) + % (_bytestr(e), item))) continue limits.append((time_limit, run_limit)) if not limits: @@ -3056,7 +3087,7 @@ def doprogress(): with ui.makeprogress(topic, total=total) as progress: - for i in pycompat.xrange(total): + for i in _xrange(total): progress.increment() timer(doprogress)
--- a/contrib/python3-whitelist Sat Sep 07 14:35:21 2019 +0100 +++ b/contrib/python3-whitelist Mon Sep 09 17:26:17 2019 -0400 @@ -124,6 +124,7 @@ test-convert-hg-sink.t test-convert-hg-source.t test-convert-hg-startrev.t +test-convert-identity.t test-convert-mtn.t test-convert-splicemap.t test-convert-svn-sink.t
--- a/hgext/fix.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/fix.py Mon Sep 09 17:26:17 2019 -0400 @@ -36,6 +36,15 @@ {first} The 1-based line number of the first line in the modified range {last} The 1-based line number of the last line in the modified range +Deleted sections of a file will be ignored by :linerange, because there is no +corresponding line range in the version being fixed. + +By default, tools that set :linerange will only be executed if there is at least +one changed line range. This is meant to prevent accidents like running a code +formatter in such a way that it unexpectedly reformats the whole file. If such a +tool needs to operate on unchanged files, it should set the :skipclean suboption +to false. + The :pattern suboption determines which files will be passed through each configured tool. See :hg:`help patterns` for possible values. If there are file arguments to :hg:`fix`, the intersection of these patterns is used. @@ -102,6 +111,13 @@ mapping fixer tool names to lists of metadata values returned from executions that modified a file. This aggregates the same metadata previously passed to the "postfixfile" hook. + +Fixer tools are run the in repository's root directory. This allows them to read +configuration files from the working copy, or even write to the working copy. +The working copy is not updated to match the revision being fixed. In fact, +several revisions may be fixed in parallel. Writes to the working copy are not +amended into the revision being fixed; fixer tools should always write fixed +file content back to stdout as documented above. """ from __future__ import absolute_import @@ -119,6 +135,7 @@ from mercurial.utils import ( procutil, + stringutil, ) from mercurial import ( @@ -152,10 +169,10 @@ FIXER_ATTRS = { 'command': None, 'linerange': None, - 'fileset': None, 'pattern': None, 'priority': 0, - 'metadata': False, + 'metadata': 'false', + 'skipclean': 'true', } for key, default in FIXER_ATTRS.items(): @@ -233,7 +250,7 @@ for rev, path in items: ctx = repo[rev] olddata = ctx[path].data() - metadata, newdata = fixfile(ui, opts, fixers, ctx, path, + metadata, newdata = fixfile(ui, repo, opts, fixers, ctx, path, basectxs[rev]) # Don't waste memory/time passing unchanged content back, but # produce one result per item either way. @@ -530,7 +547,7 @@ basectxs[rev].add(pctx) return basectxs -def fixfile(ui, opts, fixers, fixctx, path, basectxs): +def fixfile(ui, repo, opts, fixers, fixctx, path, basectxs): """Run any configured fixers that should affect the file in this context Returns the file content that results from applying the fixers in some order @@ -539,21 +556,22 @@ (i.e. they will only avoid lines that are common to all basectxs). A fixer tool's stdout will become the file's new content if and only if it - exits with code zero. + exits with code zero. The fixer tool's working directory is the repository's + root. """ metadata = {} newdata = fixctx[path].data() for fixername, fixer in fixers.iteritems(): if fixer.affects(opts, fixctx, path): - rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata) - command = fixer.command(ui, path, rangesfn) + ranges = lineranges(opts, path, basectxs, fixctx, newdata) + command = fixer.command(ui, path, ranges) if command is None: continue ui.debug('subprocess: %s\n' % (command,)) proc = subprocess.Popen( procutil.tonativestr(command), shell=True, - cwd=procutil.tonativestr(b'/'), + cwd=repo.root, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -702,14 +720,20 @@ for name in fixernames(ui): fixers[name] = Fixer() attrs = ui.configsuboptions('fix', name)[1] - if 'fileset' in attrs and 'pattern' not in attrs: - ui.warn(_('the fix.tool:fileset config name is deprecated; ' - 'please rename it to fix.tool:pattern\n')) - attrs['pattern'] = attrs['fileset'] for key, default in FIXER_ATTRS.items(): setattr(fixers[name], pycompat.sysstr('_' + key), attrs.get(key, default)) fixers[name]._priority = int(fixers[name]._priority) + fixers[name]._metadata = stringutil.parsebool(fixers[name]._metadata) + fixers[name]._skipclean = stringutil.parsebool(fixers[name]._skipclean) + # Don't use a fixer if it has no pattern configured. It would be + # dangerous to let it affect all files. It would be pointless to let it + # affect no files. There is no reasonable subset of files to use as the + # default. + if fixers[name]._pattern is None: + ui.warn( + _('fixer tool has no pattern configuration: %s\n') % (name,)) + del fixers[name] return collections.OrderedDict( sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)) @@ -727,13 +751,14 @@ def affects(self, opts, fixctx, path): """Should this fixer run on the file at the given path and context?""" - return scmutil.match(fixctx, [self._pattern], opts)(path) + return (self._pattern is not None and + scmutil.match(fixctx, [self._pattern], opts)(path)) def shouldoutputmetadata(self): """Should the stdout of this fixer start with JSON and a null byte?""" return self._metadata - def command(self, ui, path, rangesfn): + def command(self, ui, path, ranges): """A shell command to use to invoke this fixer on the given file/lines May return None if there is no appropriate command to run for the given @@ -743,8 +768,7 @@ parts = [expand(ui, self._command, {'rootpath': path, 'basename': os.path.basename(path)})] if self._linerange: - ranges = rangesfn() - if not ranges: + if self._skipclean and not ranges: # No line ranges to fix, so don't run the fixer. return None for first, last in ranges:
--- a/hgext/fsmonitor/__init__.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/fsmonitor/__init__.py Mon Sep 09 17:26:17 2019 -0400 @@ -112,6 +112,7 @@ import os import stat import sys +import tempfile import weakref from mercurial.i18n import _ @@ -166,6 +167,7 @@ ) configitem('fsmonitor', 'verbose', default=True, + experimental=True, ) configitem('experimental', 'fsmonitor.transaction_notify', default=False, @@ -175,6 +177,23 @@ # and will disable itself when encountering one of these: _blacklist = ['largefiles', 'eol'] +def debuginstall(ui, fm): + fm.write("fsmonitor-watchman", + _("fsmonitor checking for watchman binary... (%s)\n"), + ui.configpath("fsmonitor", "watchman_exe")) + root = tempfile.mkdtemp() + c = watchmanclient.client(ui, root) + err = None + try: + v = c.command("version") + fm.write("fsmonitor-watchman-version", + _(" watchman binary version %s\n"), v["version"]) + except watchmanclient.Unavailable as e: + err = str(e) + fm.condwrite(err, "fsmonitor-watchman-error", + _(" watchman binary missing or broken: %s\n"), err) + return 1 if err else 0 + def _handleunavailable(ui, state, ex): """Exception handler for Watchman interaction exceptions""" if isinstance(ex, watchmanclient.Unavailable): @@ -780,7 +799,7 @@ return try: - client = watchmanclient.client(repo) + client = watchmanclient.client(repo.ui, repo._root) except Exception as ex: _handleunavailable(ui, fsmonitorstate, ex) return
--- a/hgext/fsmonitor/watchmanclient.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/fsmonitor/watchmanclient.py Mon Sep 09 17:26:17 2019 -0400 @@ -33,12 +33,12 @@ super(WatchmanNoRoot, self).__init__(msg) class client(object): - def __init__(self, repo, timeout=1.0): + def __init__(self, ui, root, timeout=1.0): err = None if not self._user: err = "couldn't get user" warn = True - if self._user in repo.ui.configlist('fsmonitor', 'blacklistusers'): + if self._user in ui.configlist('fsmonitor', 'blacklistusers'): err = 'user %s in blacklist' % self._user warn = False @@ -47,8 +47,8 @@ self._timeout = timeout self._watchmanclient = None - self._root = repo.root - self._ui = repo.ui + self._root = root + self._ui = ui self._firsttime = True def settimeout(self, timeout):
--- a/hgext/largefiles/overrides.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/largefiles/overrides.py Mon Sep 09 17:26:17 2019 -0400 @@ -459,7 +459,7 @@ lfiles = set() for f in actions: splitstandin = lfutil.splitstandin(f) - if splitstandin in p1: + if splitstandin is not None and splitstandin in p1: lfiles.add(splitstandin) elif lfutil.standin(f) in p1: lfiles.add(f)
--- a/hgext/lfs/__init__.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/lfs/__init__.py Mon Sep 09 17:26:17 2019 -0400 @@ -141,13 +141,16 @@ minifileset, node, pycompat, - repository, revlog, scmutil, templateutil, util, ) +from mercurial.interfaces import ( + repository, +) + from . import ( blobstore, wireprotolfsserver,
--- a/hgext/lfs/wrapper.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/lfs/wrapper.py Mon Sep 09 17:26:17 2019 -0400 @@ -21,7 +21,6 @@ exchange, exthelper, localrepo, - repository, revlog, scmutil, upgrade, @@ -30,6 +29,10 @@ wireprotov1server, ) +from mercurial.interfaces import ( + repository, +) + from mercurial.utils import ( storageutil, stringutil, @@ -169,7 +172,7 @@ # Wrapping may also be applied by remotefilelog def filelogrenamed(orig, self, node): if _islfs(self, node): - rawtext = self._revlog.revision(node, raw=True) + rawtext = self._revlog.rawdata(node) if not rawtext: return False metadata = pointer.deserialize(rawtext) @@ -183,7 +186,7 @@ def filelogsize(orig, self, rev): if _islfs(self, rev=rev): # fast path: use lfs metadata to answer size - rawtext = self._revlog.revision(rev, raw=True) + rawtext = self._revlog.rawdata(rev) metadata = pointer.deserialize(rawtext) return int(metadata['size']) return orig(self, rev)
--- a/hgext/narrow/__init__.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/narrow/__init__.py Mon Sep 09 17:26:17 2019 -0400 @@ -17,6 +17,9 @@ from mercurial import ( localrepo, registrar, +) + +from mercurial.interfaces import ( repository, )
--- a/hgext/narrow/narrowbundle2.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/narrow/narrowbundle2.py Mon Sep 09 17:26:17 2019 -0400 @@ -23,10 +23,12 @@ localrepo, narrowspec, repair, - repository, util, wireprototypes, ) +from mercurial.interfaces import ( + repository, +) from mercurial.utils import ( stringutil, ) @@ -236,16 +238,16 @@ f = vfs.open(chgrpfile, "rb") try: gen = exchange.readbundle(ui, f, chgrpfile, vfs) - if not ui.verbose: - # silence internal shuffling chatter - ui.pushbuffer() - if isinstance(gen, bundle2.unbundle20): - with repo.transaction('strip') as tr: - bundle2.processbundle(repo, gen, lambda: tr) - else: - gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) - if not ui.verbose: - ui.popbuffer() + # silence internal shuffling chatter + override = {('ui', 'quiet'): True} + if ui.verbose: + override = {} + with ui.configoverride(override): + if isinstance(gen, bundle2.unbundle20): + with repo.transaction('strip') as tr: + bundle2.processbundle(repo, gen, lambda: tr) + else: + gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) finally: f.close()
--- a/hgext/narrow/narrowcommands.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/narrow/narrowcommands.py Mon Sep 09 17:26:17 2019 -0400 @@ -25,12 +25,14 @@ pycompat, registrar, repair, - repository, repoview, sparse, util, wireprototypes, ) +from mercurial.interfaces import ( + repository, +) table = {} command = registrar.command(table) @@ -368,7 +370,7 @@ opts = pycompat.byteskwargs(opts) if repository.NARROW_REQUIREMENT not in repo.requirements: raise error.Abort(_('the tracked command is only supported on ' - 'respositories cloned with --narrow')) + 'repositories cloned with --narrow')) # Before supporting, decide whether it "hg tracked --clear" should mean # tracking no paths or all paths.
--- a/hgext/notify.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/notify.py Mon Sep 09 17:26:17 2019 -0400 @@ -82,6 +82,12 @@ notify.domain Default email domain for sender or recipients with no explicit domain. + It is also used for the domain part of the ``Message-Id`` when using + ``notify.messageidseed``. + +notify.messageidseed + Create deterministic ``Message-Id`` headers for the mails based on the seed + and the revision identifier of the first commit in the changeset. notify.style Style file to use when formatting emails. @@ -144,6 +150,7 @@ import email.errors as emailerrors import email.parser as emailparser import fnmatch +import hashlib import socket import time @@ -183,6 +190,9 @@ configitem('notify', 'domain', default=None, ) +configitem('notify', 'messageidseed', + default=None, +) configitem('notify', 'fromauthor', default=None, ) @@ -268,6 +278,7 @@ self.subs = self.subscribers() self.merge = self.ui.configbool('notify', 'merge') self.showfunc = self.ui.configbool('notify', 'showfunc') + self.messageidseed = self.ui.config('notify', 'messageidseed') if self.showfunc is None: self.showfunc = self.ui.configbool('diff', 'showfunc') @@ -412,10 +423,7 @@ msg[r'X-Hg-Notification'] = r'changeset %s' % ctx if not msg[r'Message-Id']: - msg[r'Message-Id'] = encoding.strfromlocal( - '<hg.%s.%d.%d@%s>' % (ctx, int(time.time()), - hash(self.repo.root), - encoding.strtolocal(socket.getfqdn()))) + msg[r'Message-Id'] = messageid(ctx, self.domain, self.messageidseed) msg[r'To'] = encoding.strfromlocal(', '.join(sorted(subs))) msgtext = encoding.strtolocal(msg.as_string()) @@ -517,3 +525,16 @@ if count: n.send(ctx, count, data) + +def messageid(ctx, domain, messageidseed): + if domain and messageidseed: + host = domain + else: + host = encoding.strtolocal(socket.getfqdn()) + if messageidseed: + messagehash = hashlib.sha512(ctx.hex() + messageidseed) + messageid = '<hg.%s@%s>' % (messagehash.hexdigest()[:64], host) + else: + messageid = '<hg.%s.%d.%d@%s>' % (ctx, int(time.time()), + hash(ctx.repo().root), host) + return encoding.strfromlocal(messageid)
--- a/hgext/remotefilelog/__init__.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/__init__.py Mon Sep 09 17:26:17 2019 -0400 @@ -219,11 +219,13 @@ configitem('remotefilelog', 'gcrepack', default=False) configitem('remotefilelog', 'repackonhggc', default=False) -configitem('repack', 'chainorphansbysize', default=True) +configitem('repack', 'chainorphansbysize', default=True, experimental=True) configitem('packs', 'maxpacksize', default=0) configitem('packs', 'maxchainlen', default=1000) +configitem('devel', 'remotefilelog.ensurestart', default=False) + # default TTL limit is 30 days _defaultlimit = 60 * 60 * 24 * 30 configitem('remotefilelog', 'nodettl', default=_defaultlimit) @@ -949,19 +951,23 @@ prefetchrevset = ui.config('remotefilelog', 'pullprefetch') bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') + ensurestart = repo.ui.configbool('devel', 'remotefilelog.ensurestart') if prefetchrevset: ui.status(_("prefetching file contents\n")) revs = scmutil.revrange(repo, [prefetchrevset]) base = repo['.'].rev() if bgprefetch: - repo.backgroundprefetch(prefetchrevset, repack=bgrepack) + repo.backgroundprefetch(prefetchrevset, repack=bgrepack, + ensurestart=ensurestart) else: repo.prefetch(revs, base=base) if bgrepack: - repackmod.backgroundrepack(repo, incremental=True) + repackmod.backgroundrepack(repo, incremental=True, + ensurestart=ensurestart) elif bgrepack: - repackmod.backgroundrepack(repo, incremental=True) + repackmod.backgroundrepack(repo, incremental=True, + ensurestart=ensurestart) return result @@ -1085,9 +1091,12 @@ revs = scmutil.revrange(repo, opts.get('rev')) repo.prefetch(revs, opts.get('base'), pats, opts) + ensurestart = repo.ui.configbool('devel', 'remotefilelog.ensurestart') + # Run repack in background if opts.get('repack'): - repackmod.backgroundrepack(repo, incremental=True) + repackmod.backgroundrepack(repo, incremental=True, + ensurestart=ensurestart) @command('repack', [ ('', 'background', None, _('run in a background process'), None), @@ -1096,8 +1105,10 @@ ], _('hg repack [OPTIONS]')) def repack_(ui, repo, *pats, **opts): if opts.get(r'background'): + ensurestart = repo.ui.configbool('devel', 'remotefilelog.ensurestart') repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'), - packsonly=opts.get(r'packsonly', False)) + packsonly=opts.get(r'packsonly', False), + ensurestart=ensurestart) return options = {'packsonly': opts.get(r'packsonly')}
--- a/hgext/remotefilelog/contentstore.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/contentstore.py Mon Sep 09 17:26:17 2019 -0400 @@ -264,7 +264,7 @@ self._repackstartlinkrev = 0 def get(self, name, node): - return self._revlog(name).revision(node, raw=True) + return self._revlog(name).rawdata(node) def getdelta(self, name, node): revision = self.get(name, node)
--- a/hgext/remotefilelog/fileserverclient.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/fileserverclient.py Mon Sep 09 17:26:17 2019 -0400 @@ -569,7 +569,7 @@ node = bin(id) rlog = self.repo.file(file) if rlog.flags(node) & revlog.REVIDX_EXTSTORED: - text = rlog.revision(node, raw=True) + text = rlog.rawdata(node) p = _lfsmod.pointer.deserialize(text) oid = p.oid() if not store.has(oid):
--- a/hgext/remotefilelog/remotefilelog.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/remotefilelog.py Mon Sep 09 17:26:17 2019 -0400 @@ -24,6 +24,7 @@ revlog, ) from mercurial.utils import storageutil +from mercurial.revlogutils import flagutil from . import ( constants, @@ -45,7 +46,7 @@ raise KeyError(node) return node -class remotefilelog(object): +class remotefilelog(flagutil.flagprocessorsmixin): _generaldelta = True @@ -57,6 +58,8 @@ self.version = 1 + self._flagprocessors = dict(flagutil.flagprocessors) + def read(self, node): """returns the file contents at this node""" t = self.revision(node) @@ -132,7 +135,7 @@ node = storageutil.hashrevisionsha1(text, p1, p2) meta, metaoffset = storageutil.parsemeta(text) - rawtext, validatehash = self._processflags(text, flags, 'write') + rawtext, validatehash = self._processflagswrite(text, flags) return self.addrawrevision(rawtext, transaction, linknode, p1, p2, node, flags, cachedelta, _metatuple=(meta, metaoffset)) @@ -262,7 +265,7 @@ revision = None delta = self.revdiff(basenode, node) else: - revision = self.revision(node, raw=True) + revision = self.rawdata(node) delta = None yield revlog.revlogrevisiondelta( node=node, @@ -277,8 +280,8 @@ ) def revdiff(self, node1, node2): - return mdiff.textdiff(self.revision(node1, raw=True), - self.revision(node2, raw=True)) + return mdiff.textdiff(self.rawdata(node1), + self.rawdata(node2)) def lookup(self, node): if len(node) == 40: @@ -321,30 +324,11 @@ flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) if flags == 0: return rawtext - text, verifyhash = self._processflags(rawtext, flags, 'read') + text, verifyhash = self._processflagsread(rawtext, flags) return text - def _processflags(self, text, flags, operation, raw=False): - # mostly copied from hg/mercurial/revlog.py - validatehash = True - orderedflags = revlog.REVIDX_FLAGS_ORDER - if operation == 'write': - orderedflags = reversed(orderedflags) - for flag in orderedflags: - if flag & flags: - vhash = True - if flag not in revlog._flagprocessors: - message = _("missing processor for flag '%#x'") % (flag) - raise revlog.RevlogError(message) - readfunc, writefunc, rawfunc = revlog._flagprocessors[flag] - if raw: - vhash = rawfunc(self, text) - elif operation == 'read': - text, vhash = readfunc(self, text) - elif operation == 'write': - text, vhash = writefunc(self, text) - validatehash = validatehash and vhash - return text, validatehash + def rawdata(self, node): + return self.revision(node, raw=False) def _read(self, id): """reads the raw file blob from disk, cache, or server"""
--- a/hgext/remotefilelog/remotefilelogserver.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/remotefilelogserver.py Mon Sep 09 17:26:17 2019 -0400 @@ -335,7 +335,7 @@ text = filectx.data() else: # lfs, read raw revision data - text = flog.revision(frev, raw=True) + text = flog.rawdata(frev) repo = filectx._repo
--- a/hgext/remotefilelog/repack.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/repack.py Mon Sep 09 17:26:17 2019 -0400 @@ -34,7 +34,8 @@ class RepackAlreadyRunning(error.Abort): pass -def backgroundrepack(repo, incremental=True, packsonly=False): +def backgroundrepack(repo, incremental=True, packsonly=False, + ensurestart=False): cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack'] msg = _("(running background repack)\n") if incremental: @@ -44,7 +45,7 @@ cmd.append('--packsonly') repo.ui.warn(msg) # We know this command will find a binary, so don't block on it starting. - procutil.runbgcommand(cmd, encoding.environ, ensurestart=False) + procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) def fullrepack(repo, options=None): """If ``packsonly`` is True, stores creating only loose objects are skipped.
--- a/hgext/remotefilelog/shallowbundle.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/shallowbundle.py Mon Sep 09 17:26:17 2019 -0400 @@ -124,7 +124,7 @@ def nodechunk(self, revlog, node, prevnode, linknode): prefix = '' if prevnode == nullid: - delta = revlog.revision(node, raw=True) + delta = revlog.rawdata(node) prefix = mdiff.trivialdiffheader(len(delta)) else: # Actually uses remotefilelog.revdiff which works on nodes, not revs @@ -267,7 +267,7 @@ if not available(f, node, f, deltabase): continue - base = fl.revision(deltabase, raw=True) + base = fl.rawdata(deltabase) text = mdiff.patch(base, delta) if not isinstance(text, bytes): text = bytes(text)
--- a/hgext/remotefilelog/shallowrepo.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/remotefilelog/shallowrepo.py Mon Sep 09 17:26:17 2019 -0400 @@ -183,7 +183,7 @@ origctx=origctx) def backgroundprefetch(self, revs, base=None, repack=False, pats=None, - opts=None): + opts=None, ensurestart=False): """Runs prefetch in background with optional repack """ cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'prefetch'] @@ -193,7 +193,8 @@ cmd += ['-r', revs] # We know this command will find a binary, so don't block # on it starting. - procutil.runbgcommand(cmd, encoding.environ, ensurestart=False) + procutil.runbgcommand(cmd, encoding.environ, + ensurestart=ensurestart) def prefetch(self, revs, base=None, pats=None, opts=None): """Prefetches all the necessary file revisions for the given revs
--- a/hgext/sqlitestore.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/sqlitestore.py Mon Sep 09 17:26:17 2019 -0400 @@ -70,12 +70,14 @@ mdiff, pycompat, registrar, - repository, util, verify, ) +from mercurial.interfaces import ( + repository, + util as interfaceutil, +) from mercurial.utils import ( - interfaceutil, storageutil, ) @@ -90,7 +92,8 @@ # experimental config: storage.sqlite.compression configitem('storage', 'sqlite.compression', - default='zstd' if zstd else 'zlib') + default='zstd' if zstd else 'zlib', + experimental=True) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -549,6 +552,9 @@ return fulltext + def rawdata(self, *args, **kwargs): + return self.revision(*args, **kwargs) + def read(self, node): return storageutil.filtermetadata(self.revision(node)) @@ -653,8 +659,7 @@ # patch operation. if baserev != nullrev and self.iscensored(baserev): hlen = struct.calcsize('>lll') - oldlen = len(self.revision(deltabase, raw=True, - _verifyhash=False)) + oldlen = len(self.rawdata(deltabase, _verifyhash=False)) newlen = len(delta) - hlen if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): @@ -663,7 +668,7 @@ if (not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored( - delta, baserev, lambda x: len(self.revision(x, raw=True)))): + delta, baserev, lambda x: len(self.rawdata(x)))): storeflags |= FLAG_CENSORED linkrev = linkmapper(linknode) @@ -716,7 +721,7 @@ # This restriction is cargo culted from revlogs and makes no sense for # SQLite, since columns can be resized at will. - if len(tombstone) > len(self.revision(censornode, raw=True)): + if len(tombstone) > len(self.rawdata(censornode)): raise error.Abort(_('censor tombstone must be no longer than ' 'censored data'))
--- a/hgext/transplant.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/transplant.py Mon Sep 09 17:26:17 2019 -0400 @@ -412,6 +412,17 @@ # this is kept only to reduce changes in a patch. pass + def stop(self, ui, repo): + """logic to stop an interrupted transplant""" + if self.canresume(): + startctx = repo['.'] + hg.updaterepo(repo, startctx.node(), overwrite=True) + ui.status(_("stopped the interrupted transplant\n")) + ui.status(_("working directory is now at %s\n") % + startctx.hex()[:12]) + self.unlog() + return 0 + def readseries(self): nodes = [] merges = [] @@ -559,6 +570,7 @@ _('parent to choose when transplanting merge'), _('REV')), ('e', 'edit', False, _('invoke editor on commit messages')), ('', 'log', None, _('append transplant info to log message')), + ('', 'stop', False, _('stop interrupted transplant')), ('c', 'continue', None, _('continue last transplant session ' 'after fixing conflicts')), ('', 'filter', '', @@ -646,6 +658,11 @@ raise error.Abort(_('--continue is incompatible with ' '--branch, --all and --merge')) return + if opts.get('stop'): + if opts.get('branch') or opts.get('all') or opts.get('merge'): + raise error.Abort(_('--stop is incompatible with ' + '--branch, --all and --merge')) + return if not (opts.get('source') or revs or opts.get('merge') or opts.get('branch')): raise error.Abort(_('no source URL, branch revision, or revision ' @@ -675,6 +692,10 @@ if opts.get('continue'): if not tp.canresume(): raise error.Abort(_('no transplant to continue')) + elif opts.get('stop'): + if not tp.canresume(): + raise error.Abort(_('no interrupted transplant found')) + return tp.stop(ui, repo) else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) @@ -734,6 +755,13 @@ if cleanupfn: cleanupfn() +def continuecmd(ui, repo): + """logic to resume an interrupted transplant using + 'hg continue'""" + with repo.wlock(): + tp = transplanter(ui, repo, {}) + return tp.resume(repo, repo, {}) + revsetpredicate = registrar.revsetpredicate() @revsetpredicate('transplanted([set])') @@ -760,9 +788,10 @@ def extsetup(ui): statemod.addunfinished ( 'transplant', fname='transplant/journal', clearable=True, + continuefunc=continuecmd, statushint=_('To continue: hg transplant --continue\n' - 'To abort: hg update'), - cmdhint=_("use 'hg transplant --continue' or 'hg update' to abort") + 'To stop: hg transplant --stop'), + cmdhint=_("use 'hg transplant --continue' or 'hg transplant --stop'") ) # tell hggettext to extract docstrings from these functions:
--- a/hgext/uncommit.py Sat Sep 07 14:35:21 2019 +0100 +++ b/hgext/uncommit.py Mon Sep 09 17:26:17 2019 -0400 @@ -55,7 +55,8 @@ # leave the attribute unspecified. testedwith = 'ships-with-hg-core' -def _commitfiltered(repo, ctx, match, keepcommit): +def _commitfiltered(repo, ctx, match, keepcommit, message=None, user=None, + date=None): """Recommit ctx with changed files not in match. Return the new node identifier, or None if nothing changed. """ @@ -90,13 +91,20 @@ if not files: repo.ui.status(_("note: keeping empty commit\n")) + if message is None: + message = ctx.description() + if not user: + user = ctx.user() + if not date: + date = ctx.date() + new = context.memctx(repo, parents=[base.node(), node.nullid], - text=ctx.description(), + text=message, files=files, filectxfn=filectxfn, - user=ctx.user(), - date=ctx.date(), + user=user, + date=date, extra=ctx.extra()) return repo.commitctx(new) @@ -104,7 +112,8 @@ [('', 'keep', None, _('allow an empty commit after uncommiting')), ('', 'allow-dirty-working-copy', False, _('allow uncommit with outstanding changes')) - ] + commands.walkopts, + ] + commands.walkopts + commands.commitopts + commands.commitopts2 + + commands.commitopts3, _('[OPTION]... [FILE]...'), helpcategory=command.CATEGORY_CHANGE_MANAGEMENT) def uncommit(ui, repo, *pats, **opts): @@ -120,6 +129,8 @@ """ opts = pycompat.byteskwargs(opts) + cmdutil.resolvecommitoptions(ui, opts) + with repo.wlock(), repo.lock(): m, a, r, d = repo.status()[:4] @@ -162,13 +173,19 @@ % scmutil.getuipathfn(repo)(f), hint=hint) with repo.transaction('uncommit'): + if not (opts[b'message'] or opts[b'logfile']): + opts[b'message'] = old.description() + message = cmdutil.logmessage(ui, pycompat.byteskwargs(opts)) + keepcommit = pats if not keepcommit: if opts.get('keep') is not None: keepcommit = opts.get('keep') else: keepcommit = ui.configbool('experimental', 'uncommit.keep') - newid = _commitfiltered(repo, old, match, keepcommit) + newid = _commitfiltered(repo, old, match, keepcommit, + message=message, user=opts.get(b'user'), + date=opts.get(b'date')) if newid is None: ui.status(_("nothing to uncommit\n")) return 1
--- a/mercurial/bookmarks.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/bookmarks.py Mon Sep 09 17:26:17 2019 -0400 @@ -480,8 +480,7 @@ Each elements of lists in result tuple is tuple "(bookmark name, changeset ID on source side, changeset ID on destination - side)". Each changeset IDs are 40 hexadecimal digit string or - None. + side)". Each changeset ID is a binary node or None. Changeset IDs of tuples in "addsrc", "adddst", "differ" or "invalid" list may be unknown for repo.
--- a/mercurial/bundle2.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/bundle2.py Mon Sep 09 17:26:17 2019 -0400 @@ -2193,8 +2193,6 @@ return new = op.repo.obsstore.mergemarkers(tr, markerdata) op.repo.invalidatevolatilesets() - if new: - op.repo.ui.status(_('%i new obsolescence markers\n') % new) op.records.add('obsmarkers', {'new': new}) if op.reply is not None: rpart = op.reply.newpart('reply:obsmarkers')
--- a/mercurial/bundlerepo.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/bundlerepo.py Mon Sep 09 17:26:17 2019 -0400 @@ -17,7 +17,10 @@ import shutil from .i18n import _ -from .node import nullid +from .node import ( + nullid, + nullrev +) from . import ( bundle2, @@ -105,23 +108,13 @@ elif rev1 <= self.repotiprev and rev2 <= self.repotiprev: return revlog.revlog.revdiff(self, rev1, rev2) - return mdiff.textdiff(self.revision(rev1, raw=True), - self.revision(rev2, raw=True)) + return mdiff.textdiff(self.rawdata(rev1), + self.rawdata(rev2)) - def revision(self, nodeorrev, _df=None, raw=False): - """return an uncompressed revision of a given node or revision - number. - """ - if isinstance(nodeorrev, int): - rev = nodeorrev - node = self.node(rev) - else: - node = nodeorrev + def _rawtext(self, node, rev, _df=None): + if rev is None: rev = self.rev(node) - - if node == nullid: - return "" - + validated = False rawtext = None chain = [] iterrev = rev @@ -132,25 +125,19 @@ break chain.append(iterrev) iterrev = self.index[iterrev][3] - if rawtext is None: - rawtext = self.baserevision(iterrev) - + if iterrev == nullrev: + rawtext = '' + elif rawtext is None: + r = super(bundlerevlog, self)._rawtext(self.node(iterrev), + iterrev, + _df=_df) + __, rawtext, validated = r + if chain: + validated = False while chain: delta = self._chunk(chain.pop()) rawtext = mdiff.patches(rawtext, [delta]) - - text, validatehash = self._processflags(rawtext, self.flags(rev), - 'read', raw=raw) - if validatehash: - self.checkhash(text, node, rev=rev) - self._revisioncache = (node, rev, rawtext) - return text - - def baserevision(self, nodeorrev): - # Revlog subclasses may override 'revision' method to modify format of - # content retrieved from revlog. To use bundlerevlog with such class one - # needs to override 'baserevision' and make more specific call here. - return revlog.revlog.revision(self, nodeorrev, raw=True) + return rev, rawtext, validated def addrevision(self, *args, **kwargs): raise NotImplementedError @@ -171,20 +158,6 @@ bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker, linkmapper) - def baserevision(self, nodeorrev): - # Although changelog doesn't override 'revision' method, some extensions - # may replace this class with another that does. Same story with - # manifest and filelog classes. - - # This bypasses filtering on changelog.node() and rev() because we need - # revision text of the bundle base even if it is hidden. - oldfilter = self.filteredrevs - try: - self.filteredrevs = () - return changelog.changelog.revision(self, nodeorrev, raw=True) - finally: - self.filteredrevs = oldfilter - class bundlemanifest(bundlerevlog, manifest.manifestrevlog): def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=''): @@ -198,17 +171,6 @@ self._dirlogstarts = dirlogstarts self._linkmapper = linkmapper - def baserevision(self, nodeorrev): - node = nodeorrev - if isinstance(node, int): - node = self.node(node) - - if node in self.fulltextcache: - result = '%s' % self.fulltextcache[node] - else: - result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True) - return result - def dirlog(self, d): if d in self._dirlogstarts: self.bundle.seek(self._dirlogstarts[d]) @@ -223,9 +185,6 @@ self._revlog = bundlerevlog(opener, self.indexfile, cgunpacker, linkmapper) - def baserevision(self, nodeorrev): - return filelog.filelog.revision(self, nodeorrev, raw=True) - class bundlepeer(localrepo.localpeer): def canpush(self): return False
--- a/mercurial/changegroup.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/changegroup.py Mon Sep 09 17:26:17 2019 -0400 @@ -25,8 +25,11 @@ mdiff, phases, pycompat, + util, +) + +from .interfaces import ( repository, - util, ) _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s") @@ -267,7 +270,7 @@ def revmap(x): return cl.rev(x) - changesets = files = revisions = 0 + changesets = 0 try: # The transaction may already carry source information. In this @@ -334,23 +337,38 @@ repo.ui.status(_("adding file changes\n")) newrevs, newfiles = _addchangegroupfiles( repo, self, revmap, trp, efiles, needfiles) - revisions += newrevs - files += newfiles + + # making sure the value exists + tr.changes.setdefault('changegroup-count-changesets', 0) + tr.changes.setdefault('changegroup-count-revisions', 0) + tr.changes.setdefault('changegroup-count-files', 0) + tr.changes.setdefault('changegroup-count-heads', 0) + + # some code use bundle operation for internal purpose. They usually + # set `ui.quiet` to do this outside of user sight. Size the report + # of such operation now happens at the end of the transaction, that + # ui.quiet has not direct effect on the output. + # + # To preserve this intend use an inelegant hack, we fail to report + # the change if `quiet` is set. We should probably move to + # something better, but this is a good first step to allow the "end + # of transaction report" to pass tests. + if not repo.ui.quiet: + tr.changes['changegroup-count-changesets'] += changesets + tr.changes['changegroup-count-revisions'] += newrevs + tr.changes['changegroup-count-files'] += newfiles deltaheads = 0 if oldheads: heads = cl.heads() - deltaheads = len(heads) - len(oldheads) + deltaheads += len(heads) - len(oldheads) for h in heads: if h not in oldheads and repo[h].closesbranch(): deltaheads -= 1 - htext = "" - if deltaheads: - htext = _(" (%+d heads)") % deltaheads - repo.ui.status(_("added %d changesets" - " with %d changes to %d files%s\n") - % (changesets, revisions, files, htext)) + # see previous comment about checking ui.quiet + if not repo.ui.quiet: + tr.changes['changegroup-count-heads'] += deltaheads repo.invalidatevolatilesets() if changesets > 0:
--- a/mercurial/changelog.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/changelog.py Mon Sep 09 17:26:17 2019 -0400 @@ -637,6 +637,9 @@ if extra is None and any(x is not None for x in extrasentries): extra = {} sortedfiles = sorted(files) + if extra is not None: + for name in ('p1copies', 'p2copies', 'filesadded', 'filesremoved'): + extra.pop(name, None) if p1copies is not None: extra['p1copies'] = encodecopies(sortedfiles, p1copies) if p2copies is not None:
--- a/mercurial/chgserver.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/chgserver.py Mon Sep 09 17:26:17 2019 -0400 @@ -172,7 +172,7 @@ except OSError: # could be ENOENT, EPERM etc. not fatal in any case pass - return _hashlist(map(trystat, paths))[:12] + return _hashlist(pycompat.maplist(trystat, paths))[:12] class hashstate(object): """a structure storing confighash, mtimehash, paths used for mtimehash"""
--- a/mercurial/cmdutil.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/cmdutil.py Mon Sep 09 17:26:17 2019 -0400 @@ -100,6 +100,13 @@ _('record the specified user as committer'), _('USER')), ] +commitopts3 = [ + (b'D', b'current-date', None, + _(b'record the current date as commit date')), + (b'U', b'current-user', None, + _(b'record the current user as committer')), +] + formatteropts = [ ('T', 'template', '', _('display with template'), _('TEMPLATE')), @@ -175,17 +182,29 @@ # editor text _linebelow = "^HG: ------------------------ >8 ------------------------$" +def resolvecommitoptions(ui, opts): + """modify commit options dict to handle related options + """ + # N.B. this is extremely similar to setupheaderopts() in mq.py + if not opts.get(b'date') and opts.get(b'current_date'): + opts[b'date'] = b'%d %d' % dateutil.makedate() + if not opts.get(b'user') and opts.get(b'current_user'): + opts[b'user'] = ui.username() + def ishunk(x): hunkclasses = (crecordmod.uihunk, patch.recordhunk) return isinstance(x, hunkclasses) def newandmodified(chunks, originalchunks): newlyaddedandmodifiedfiles = set() + alsorestore = set() for chunk in chunks: if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in originalchunks): newlyaddedandmodifiedfiles.add(chunk.header.filename()) - return newlyaddedandmodifiedfiles + alsorestore.update(set(chunk.header.files()) - + {chunk.header.filename()}) + return newlyaddedandmodifiedfiles, alsorestore def parsealiases(cmd): return cmd.split("|") @@ -326,8 +345,11 @@ # We need to keep a backup of files that have been newly added and # modified during the recording process because there is a previous - # version without the edit in the workdir - newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks) + # version without the edit in the workdir. We also will need to restore + # files that were the sources of renames so that the patch application + # works. + newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks, + originalchunks) contenders = set() for h in chunks: try: @@ -392,7 +414,7 @@ # 3a. apply filtered patch to clean repo (clean) if backups: # Equivalent to hg.revert - m = scmutil.matchfiles(repo, backups.keys()) + m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore) mergemod.update(repo, repo.dirstate.p1(), branchmerge=False, force=True, matcher=m) @@ -3172,7 +3194,13 @@ except error.PatchError as err: raise error.Abort(_('error parsing patch: %s') % err) - newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks) + # FIXME: when doing an interactive revert of a copy, there's no way of + # performing a partial revert of the added file, the only option is + # "remove added file <name> (Yn)?", so we don't need to worry about the + # alsorestore value. Ideally we'd be able to partially revert + # copied/renamed files. + newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified( + chunks, originalchunks) if tobackup is None: tobackup = set() # Apply changes
--- a/mercurial/commands.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/commands.py Mon Sep 09 17:26:17 2019 -0400 @@ -118,6 +118,7 @@ walkopts = cmdutil.walkopts commitopts = cmdutil.commitopts commitopts2 = cmdutil.commitopts2 +commitopts3 = cmdutil.commitopts3 formatteropts = cmdutil.formatteropts templateopts = cmdutil.templateopts logopts = cmdutil.logopts @@ -1872,6 +1873,7 @@ for section, name, value in ui.walkconfig(untrusted=untrusted): source = ui.configsource(section, name, untrusted) value = pycompat.bytestr(value) + defaultvalue = ui.configdefault(section, name) if fm.isplain(): source = source or 'none' value = value.replace('\n', '\\n') @@ -1885,6 +1887,7 @@ fm.write('value', '%s\n', value) else: fm.write('name value', '%s=%s\n', entryname, value) + fm.data(defaultvalue=defaultvalue) matched = True fm.end() if matched:
--- a/mercurial/configitems.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/configitems.py Mon Sep 09 17:26:17 2019 -0400 @@ -39,13 +39,14 @@ """ def __init__(self, section, name, default=None, alias=(), - generic=False, priority=0): + generic=False, priority=0, experimental=False): self.section = section self.name = name self.default = default self.alias = list(alias) self.generic = generic self.priority = priority + self.experimental = experimental self._re = None if generic: self._re = re.compile(self.name) @@ -166,6 +167,7 @@ ) coreconfigitem('censor', 'policy', default='abort', + experimental=True, ) coreconfigitem('chgserver', 'idletimeout', default=3600, @@ -184,9 +186,11 @@ ) coreconfigitem('cmdserver', 'max-repo-cache', default=0, + experimental=True, ) coreconfigitem('cmdserver', 'message-encodings', default=list, + experimental=True, ) coreconfigitem('cmdserver', 'track-log', default=lambda: ['chgserver', 'cmdserver', 'repocache'], @@ -207,6 +211,7 @@ ) coreconfigitem('commands', 'grep.all-files', default=False, + experimental=True, ) coreconfigitem('commands', 'resolve.confirm', default=False, @@ -226,6 +231,7 @@ ) coreconfigitem('commands', 'status.skipstates', default=[], + experimental=True, ) coreconfigitem('commands', 'status.terse', default='', @@ -314,6 +320,7 @@ ) coreconfigitem('convert', 'ignoreancestorcheck', default=False, + experimental=True, ) coreconfigitem('convert', 'localtimezone', default=False, @@ -415,6 +422,9 @@ coreconfigitem('devel', 'debug.peer-request', default=False, ) +coreconfigitem('devel', 'discovery.randomize', + default=True, +) _registerdiffopts(section='diff') coreconfigitem('email', 'bcc', default=None, @@ -684,18 +694,22 @@ ) coreconfigitem('format', 'chunkcachesize', default=None, + experimental=True, ) coreconfigitem('format', 'dotencode', default=True, ) coreconfigitem('format', 'generaldelta', default=False, + experimental=True, ) coreconfigitem('format', 'manifestcachesize', default=None, + experimental=True, ) coreconfigitem('format', 'maxchainlen', default=dynamicdefault, + experimental=True, ) coreconfigitem('format', 'obsstore-version', default=None, @@ -718,6 +732,7 @@ ) coreconfigitem('format', 'internal-phase', default=False, + experimental=True, ) coreconfigitem('fsmonitor', 'warn_when_unused', default=True, @@ -823,6 +838,7 @@ ) coreconfigitem('merge', 'preferancestor', default=lambda: ['*'], + experimental=True, ) coreconfigitem('merge', 'strict-capability-check', default=False, @@ -1007,6 +1023,7 @@ ) coreconfigitem('storage', 'new-repo-backend', default='revlogv1', + experimental=True, ) coreconfigitem('storage', 'revlog.optimize-delta-parent-choice', default=True, @@ -1117,6 +1134,7 @@ ) coreconfigitem('sparse', 'missingwarning', default=True, + experimental=True, ) coreconfigitem('subrepos', 'allowed', default=dynamicdefault, # to make backporting simpler @@ -1463,6 +1481,7 @@ ) coreconfigitem('web', 'view', default='served', + experimental=True, ) coreconfigitem('worker', 'backgroundclose', default=dynamicdefault,
--- a/mercurial/context.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/context.py Mon Sep 09 17:26:17 2019 -0400 @@ -24,6 +24,7 @@ wdirhex, ) from . import ( + copies, dagop, encoding, error, @@ -274,23 +275,7 @@ @propertycache def _copies(self): - p1copies = {} - p2copies = {} - p1 = self.p1() - p2 = self.p2() - narrowmatch = self._repo.narrowmatch() - for dst in self.files(): - if not narrowmatch(dst) or dst not in self: - continue - copied = self[dst].renamed() - if not copied: - continue - src, srcnode = copied - if src in p1 and p1[src].filenode() == srcnode: - p1copies[dst] = src - elif src in p2 and p2[src].filenode() == srcnode: - p2copies[dst] = src - return p1copies, p2copies + return copies.computechangesetcopies(self) def p1copies(self): return self._copies[0] def p2copies(self): @@ -474,24 +459,14 @@ (source == 'compatibility' and self._changeset.filesadded is not None)): return self._changeset.filesadded or [] - - added = [] - for f in self.files(): - if not any(f in p for p in self.parents()): - added.append(f) - return added + return scmutil.computechangesetfilesadded(self) def filesremoved(self): source = self._repo.ui.config('experimental', 'copies.read-from') if (source == 'changeset-only' or (source == 'compatibility' and self._changeset.filesremoved is not None)): return self._changeset.filesremoved or [] - - removed = [] - for f in self.files(): - if f not in self: - removed.append(f) - return removed + return scmutil.computechangesetfilesremoved(self) @propertycache def _copies(self): @@ -1078,7 +1053,7 @@ filelog=self._filelog, changeid=changeid) def rawdata(self): - return self._filelog.revision(self._filenode, raw=True) + return self._filelog.rawdata(self._filenode) def rawflags(self): """low-level revlog flags""" @@ -1583,9 +1558,10 @@ parents = self._repo.dirstate.parents() p1manifest = self._repo[parents[0]].manifest() p2manifest = self._repo[parents[1]].manifest() + changedset = set(self.added()) | set(self.modified()) narrowmatch = self._repo.narrowmatch() for dst, src in self._repo.dirstate.copies().items(): - if not narrowmatch(dst): + if dst not in changedset or not narrowmatch(dst): continue if src in p1manifest: p1copies[dst] = src
--- a/mercurial/copies.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/copies.py Mon Sep 09 17:26:17 2019 -0400 @@ -809,3 +809,28 @@ continue if dst in wctx: wctx[dst].markcopied(src) + +def computechangesetcopies(ctx): + """return the copies data for a changeset + + The copies data are returned as a pair of dictionnary (p1copies, p2copies). + + Each dictionnary are in the form: `{newname: oldname}` + """ + p1copies = {} + p2copies = {} + p1 = ctx.p1() + p2 = ctx.p2() + narrowmatch = ctx._repo.narrowmatch() + for dst in ctx.files(): + if not narrowmatch(dst) or dst not in ctx: + continue + copied = ctx[dst].renamed() + if not copied: + continue + src, srcnode = copied + if src in p1 and p1[src].filenode() == srcnode: + p1copies[dst] = src + elif src in p2 and p2[src].filenode() == srcnode: + p2copies[dst] = src + return p1copies, p2copies
--- a/mercurial/debugcommands.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/debugcommands.py Mon Sep 09 17:26:17 2019 -0400 @@ -562,7 +562,7 @@ raise error.CommandError('debugdata', _('invalid arguments')) r = cmdutil.openstorage(repo, 'debugdata', file_, opts) try: - ui.write(r.revision(r.lookup(rev), raw=True)) + ui.write(r.rawdata(r.lookup(rev))) except KeyError: raise error.Abort(_('invalid revision identifier %s') % rev) @@ -1383,6 +1383,11 @@ fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" " (specify a username in your configuration file)\n"), err) + for name, mod in extensions.extensions(): + handler = getattr(mod, 'debuginstall', None) + if handler is not None: + problems += handler(ui, fm) + fm.condwrite(not problems, '', _("no problems detected\n")) if not problems: @@ -2843,8 +2848,10 @@ ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), ('', 'run', False, _('performs an upgrade')), ('', 'backup', True, _('keep the old repository content around')), + ('', 'changelog', None, _('select the changelog for upgrade')), + ('', 'manifest', None, _('select the manifest for upgrade')), ]) -def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True): +def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts): """upgrade a repository to use different features If no arguments are specified, the repository is evaluated for upgrade @@ -2862,9 +2869,17 @@ rename some directories inside the ``.hg`` directory. On most machines, this should complete almost instantaneously and the chances of a consumer being unable to access the repository should be low. + + By default, all revlog will be upgraded. You can restrict this using flag + such as `--manifest`: + + * `--manifest`: only optimize the manifest + * `--no-manifest`: optimize all revlog but the manifest + * `--changelog`: optimize the changelog only + * `--no-changelog --no-manifest`: optimize filelogs only """ return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize, - backup=backup) + backup=backup, **opts) @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'), inferrepo=True)
--- a/mercurial/dirstate.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/dirstate.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,7 +28,7 @@ ) parsers = policy.importmod(r'parsers') -dirstatemod = policy.importrust(r'dirstate', default=parsers) +rustmod = policy.importrust(r'dirstate') propertycache = util.propertycache filecache = scmutil.filecache @@ -652,7 +652,8 @@ delaywrite = self._ui.configint('debug', 'dirstate.delaywrite') if delaywrite > 0: # do we have any files to delay for? - for f, e in self._map.iteritems(): + items = self._map.iteritems() + for f, e in items: if e[0] == 'n' and e[3] == now: import time # to avoid useless import # rather than sleep n seconds, sleep until the next @@ -663,6 +664,12 @@ time.sleep(end - clock) now = end # trust our estimate that the end is near now break + # since the iterator is potentially not deleted, + # delete the iterator to release the reference for the Rust + # implementation. + # TODO make the Rust implementation behave like Python + # since this would not work with a non ref-counting GC. + del items self._map.write(st, now) self._lastnormaltime = 0 @@ -1475,7 +1482,7 @@ # parsing the dirstate. # # (we cannot decorate the function directly since it is in a C module) - parse_dirstate = util.nogc(dirstatemod.parse_dirstate) + parse_dirstate = util.nogc(parsers.parse_dirstate) p = parse_dirstate(self._map, self.copymap, st) if not self._dirtyparents: self.setparents(*p) @@ -1486,8 +1493,8 @@ self.get = self._map.get def write(self, st, now): - st.write(dirstatemod.pack_dirstate(self._map, self.copymap, - self.parents(), now)) + st.write(parsers.pack_dirstate(self._map, self.copymap, + self.parents(), now)) st.close() self._dirtyparents = False self.nonnormalset, self.otherparentset = self.nonnormalentries() @@ -1516,3 +1523,186 @@ for name in self._dirs: f[normcase(name)] = name return f + + +if rustmod is not None: + class dirstatemap(object): + def __init__(self, ui, opener, root): + self._ui = ui + self._opener = opener + self._root = root + self._filename = 'dirstate' + self._parents = None + self._dirtyparents = False + + # for consistent view between _pl() and _read() invocations + self._pendingmode = None + + def addfile(self, *args, **kwargs): + return self._rustmap.addfile(*args, **kwargs) + + def removefile(self, *args, **kwargs): + return self._rustmap.removefile(*args, **kwargs) + + def dropfile(self, *args, **kwargs): + return self._rustmap.dropfile(*args, **kwargs) + + def clearambiguoustimes(self, *args, **kwargs): + return self._rustmap.clearambiguoustimes(*args, **kwargs) + + def nonnormalentries(self): + return self._rustmap.nonnormalentries() + + def get(self, *args, **kwargs): + return self._rustmap.get(*args, **kwargs) + + @propertycache + def _rustmap(self): + self._rustmap = rustmod.DirstateMap(self._root) + self.read() + return self._rustmap + + @property + def copymap(self): + return self._rustmap.copymap() + + def preload(self): + self._rustmap + + def clear(self): + self._rustmap.clear() + self.setparents(nullid, nullid) + util.clearcachedproperty(self, "_dirs") + util.clearcachedproperty(self, "_alldirs") + util.clearcachedproperty(self, "dirfoldmap") + + def items(self): + return self._rustmap.items() + + def keys(self): + return iter(self._rustmap) + + def __contains__(self, key): + return key in self._rustmap + + def __getitem__(self, item): + return self._rustmap[item] + + def __len__(self): + return len(self._rustmap) + + def __iter__(self): + return iter(self._rustmap) + + # forward for python2,3 compat + iteritems = items + + def _opendirstatefile(self): + fp, mode = txnutil.trypending(self._root, self._opener, + self._filename) + if self._pendingmode is not None and self._pendingmode != mode: + fp.close() + raise error.Abort(_('working directory state may be ' + 'changed parallelly')) + self._pendingmode = mode + return fp + + def setparents(self, p1, p2): + self._rustmap.setparents(p1, p2) + self._parents = (p1, p2) + self._dirtyparents = True + + def parents(self): + if not self._parents: + try: + fp = self._opendirstatefile() + st = fp.read(40) + fp.close() + except IOError as err: + if err.errno != errno.ENOENT: + raise + # File doesn't exist, so the current state is empty + st = '' + + try: + self._parents = self._rustmap.parents(st) + except ValueError: + raise error.Abort(_('working directory state appears ' + 'damaged!')) + + return self._parents + + def read(self): + # ignore HG_PENDING because identity is used only for writing + self.identity = util.filestat.frompath( + self._opener.join(self._filename)) + + try: + fp = self._opendirstatefile() + try: + st = fp.read() + finally: + fp.close() + except IOError as err: + if err.errno != errno.ENOENT: + raise + return + if not st: + return + + parse_dirstate = util.nogc(self._rustmap.read) + parents = parse_dirstate(st) + if parents and not self._dirtyparents: + self.setparents(*parents) + + def write(self, st, now): + parents = self.parents() + st.write(self._rustmap.write(parents[0], parents[1], now)) + st.close() + self._dirtyparents = False + + @propertycache + def filefoldmap(self): + """Returns a dictionary mapping normalized case paths to their + non-normalized versions. + """ + return self._rustmap.filefoldmapasdict() + + def hastrackeddir(self, d): + self._dirs # Trigger Python's propertycache + return self._rustmap.hastrackeddir(d) + + def hasdir(self, d): + self._dirs # Trigger Python's propertycache + return self._rustmap.hasdir(d) + + @propertycache + def _dirs(self): + return self._rustmap.getdirs() + + @propertycache + def _alldirs(self): + return self._rustmap.getalldirs() + + @propertycache + def identity(self): + self._rustmap + return self.identity + + @property + def nonnormalset(self): + nonnorm, otherparents = self._rustmap.nonnormalentries() + return nonnorm + + @property + def otherparentset(self): + nonnorm, otherparents = self._rustmap.nonnormalentries() + return otherparents + + @propertycache + def dirfoldmap(self): + f = {} + normcase = util.normcase + for name in self._dirs: + f[normcase(name)] = name + return f
--- a/mercurial/exchange.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/exchange.py Mon Sep 09 17:26:17 2019 -0400 @@ -34,7 +34,6 @@ phases, pushkey, pycompat, - repository, scmutil, sslutil, streamclone, @@ -42,6 +41,9 @@ util, wireprototypes, ) +from .interfaces import ( + repository, +) from .utils import ( stringutil, ) @@ -728,9 +730,9 @@ return _processcompared(pushop, ancestors, explicit, remotebookmark, comp) def _processcompared(pushop, pushed, explicit, remotebms, comp): - """take decision on bookmark to pull from the remote bookmark + """take decision on bookmarks to push to the remote repo - Exist to help extensions who want to alter this behavior. + Exists to help extensions alter this behavior. """ addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
--- a/mercurial/exchangev2.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/exchangev2.py Mon Sep 09 17:26:17 2019 -0400 @@ -22,8 +22,10 @@ narrowspec, phases, pycompat, + setdiscovery, +) +from .interfaces import ( repository, - setdiscovery, ) def pull(pullop):
--- a/mercurial/filelog.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/filelog.py Mon Sep 09 17:26:17 2019 -0400 @@ -14,11 +14,13 @@ ) from . import ( error, - repository, revlog, ) +from .interfaces import ( + repository, + util as interfaceutil, +) from .utils import ( - interfaceutil, storageutil, ) @@ -90,6 +92,9 @@ def revision(self, node, _df=None, raw=False): return self._revlog.revision(node, _df=_df, raw=raw) + def rawdata(self, node, _df=None): + return self._revlog.rawdata(node, _df=_df) + def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, assumehaveparentrevisions=False, deltamode=repository.CG_DELTAMODE_STD):
--- a/mercurial/hg.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/hg.py Mon Sep 09 17:26:17 2019 -0400 @@ -39,7 +39,6 @@ node, phases, pycompat, - repository as repositorymod, scmutil, sshpeer, statichttprepo, @@ -51,6 +50,10 @@ vfs as vfsmod, ) +from .interfaces import ( + repository as repositorymod, +) + release = lock.release # shared features
--- a/mercurial/httppeer.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/httppeer.py Mon Sep 09 17:26:17 2019 -0400 @@ -21,7 +21,6 @@ error, httpconnection, pycompat, - repository, statichttprepo, url as urlmod, util, @@ -31,9 +30,12 @@ wireprotov2peer, wireprotov2server, ) +from .interfaces import ( + repository, + util as interfaceutil, +) from .utils import ( cborutil, - interfaceutil, stringutil, ) @@ -488,27 +490,21 @@ os.unlink(tempname) def _calltwowaystream(self, cmd, fp, **args): - fh = None - fp_ = None filename = None try: # dump bundle to disk fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") - fh = os.fdopen(fd, r"wb") - d = fp.read(4096) - while d: - fh.write(d) + with os.fdopen(fd, r"wb") as fh: d = fp.read(4096) - fh.close() + while d: + fh.write(d) + d = fp.read(4096) # start http push - fp_ = httpconnection.httpsendfile(self.ui, filename, "rb") - headers = {r'Content-Type': r'application/mercurial-0.1'} - return self._callstream(cmd, data=fp_, headers=headers, **args) + with httpconnection.httpsendfile(self.ui, filename, "rb") as fp_: + headers = {r'Content-Type': r'application/mercurial-0.1'} + return self._callstream(cmd, data=fp_, headers=headers, **args) finally: - if fp_ is not None: - fp_.close() - if fh is not None: - fh.close() + if filename is not None: os.unlink(filename) def _callcompressable(self, cmd, **args):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/interfaces/repository.py Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,1877 @@ +# repository.py - Interfaces and base classes for repositories and peers. +# +# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from ..i18n import _ +from .. import ( + error, +) +from . import ( + util as interfaceutil, +) + +# When narrowing is finalized and no longer subject to format changes, +# we should move this to just "narrow" or similar. +NARROW_REQUIREMENT = 'narrowhg-experimental' + +# Local repository feature string. + +# Revlogs are being used for file storage. +REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage' +# The storage part of the repository is shared from an external source. +REPO_FEATURE_SHARED_STORAGE = b'sharedstore' +# LFS supported for backing file storage. +REPO_FEATURE_LFS = b'lfs' +# Repository supports being stream cloned. +REPO_FEATURE_STREAM_CLONE = b'streamclone' +# Files storage may lack data for all ancestors. +REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage' + +REVISION_FLAG_CENSORED = 1 << 15 +REVISION_FLAG_ELLIPSIS = 1 << 14 +REVISION_FLAG_EXTSTORED = 1 << 13 + +REVISION_FLAGS_KNOWN = ( + REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED) + +CG_DELTAMODE_STD = b'default' +CG_DELTAMODE_PREV = b'previous' +CG_DELTAMODE_FULL = b'fulltext' +CG_DELTAMODE_P1 = b'p1' + +class ipeerconnection(interfaceutil.Interface): + """Represents a "connection" to a repository. + + This is the base interface for representing a connection to a repository. + It holds basic properties and methods applicable to all peer types. + + This is not a complete interface definition and should not be used + outside of this module. + """ + ui = interfaceutil.Attribute("""ui.ui instance""") + + def url(): + """Returns a URL string representing this peer. + + Currently, implementations expose the raw URL used to construct the + instance. It may contain credentials as part of the URL. The + expectations of the value aren't well-defined and this could lead to + data leakage. + + TODO audit/clean consumers and more clearly define the contents of this + value. + """ + + def local(): + """Returns a local repository instance. + + If the peer represents a local repository, returns an object that + can be used to interface with it. Otherwise returns ``None``. + """ + + def peer(): + """Returns an object conforming to this interface. + + Most implementations will ``return self``. + """ + + def canpush(): + """Returns a boolean indicating if this peer can be pushed to.""" + + def close(): + """Close the connection to this peer. + + This is called when the peer will no longer be used. Resources + associated with the peer should be cleaned up. + """ + +class ipeercapabilities(interfaceutil.Interface): + """Peer sub-interface related to capabilities.""" + + def capable(name): + """Determine support for a named capability. + + Returns ``False`` if capability not supported. + + Returns ``True`` if boolean capability is supported. Returns a string + if capability support is non-boolean. + + Capability strings may or may not map to wire protocol capabilities. + """ + + def requirecap(name, purpose): + """Require a capability to be present. + + Raises a ``CapabilityError`` if the capability isn't present. + """ + +class ipeercommands(interfaceutil.Interface): + """Client-side interface for communicating over the wire protocol. + + This interface is used as a gateway to the Mercurial wire protocol. + methods commonly call wire protocol commands of the same name. + """ + + def branchmap(): + """Obtain heads in named branches. + + Returns a dict mapping branch name to an iterable of nodes that are + heads on that branch. + """ + + def capabilities(): + """Obtain capabilities of the peer. + + Returns a set of string capabilities. + """ + + def clonebundles(): + """Obtains the clone bundles manifest for the repo. + + Returns the manifest as unparsed bytes. + """ + + def debugwireargs(one, two, three=None, four=None, five=None): + """Used to facilitate debugging of arguments passed over the wire.""" + + def getbundle(source, **kwargs): + """Obtain remote repository data as a bundle. + + This command is how the bulk of repository data is transferred from + the peer to the local repository + + Returns a generator of bundle data. + """ + + def heads(): + """Determine all known head revisions in the peer. + + Returns an iterable of binary nodes. + """ + + def known(nodes): + """Determine whether multiple nodes are known. + + Accepts an iterable of nodes whose presence to check for. + + Returns an iterable of booleans indicating of the corresponding node + at that index is known to the peer. + """ + + def listkeys(namespace): + """Obtain all keys in a pushkey namespace. + + Returns an iterable of key names. + """ + + def lookup(key): + """Resolve a value to a known revision. + + Returns a binary node of the resolved revision on success. + """ + + def pushkey(namespace, key, old, new): + """Set a value using the ``pushkey`` protocol. + + Arguments correspond to the pushkey namespace and key to operate on and + the old and new values for that key. + + Returns a string with the peer result. The value inside varies by the + namespace. + """ + + def stream_out(): + """Obtain streaming clone data. + + Successful result should be a generator of data chunks. + """ + + def unbundle(bundle, heads, url): + """Transfer repository data to the peer. + + This is how the bulk of data during a push is transferred. + + Returns the integer number of heads added to the peer. + """ + +class ipeerlegacycommands(interfaceutil.Interface): + """Interface for implementing support for legacy wire protocol commands. + + Wire protocol commands transition to legacy status when they are no longer + used by modern clients. To facilitate identifying which commands are + legacy, the interfaces are split. + """ + + def between(pairs): + """Obtain nodes between pairs of nodes. + + ``pairs`` is an iterable of node pairs. + + Returns an iterable of iterables of nodes corresponding to each + requested pair. + """ + + def branches(nodes): + """Obtain ancestor changesets of specific nodes back to a branch point. + + For each requested node, the peer finds the first ancestor node that is + a DAG root or is a merge. + + Returns an iterable of iterables with the resolved values for each node. + """ + + def changegroup(nodes, source): + """Obtain a changegroup with data for descendants of specified nodes.""" + + def changegroupsubset(bases, heads, source): + pass + +class ipeercommandexecutor(interfaceutil.Interface): + """Represents a mechanism to execute remote commands. + + This is the primary interface for requesting that wire protocol commands + be executed. Instances of this interface are active in a context manager + and have a well-defined lifetime. When the context manager exits, all + outstanding requests are waited on. + """ + + def callcommand(name, args): + """Request that a named command be executed. + + Receives the command name and a dictionary of command arguments. + + Returns a ``concurrent.futures.Future`` that will resolve to the + result of that command request. That exact value is left up to + the implementation and possibly varies by command. + + Not all commands can coexist with other commands in an executor + instance: it depends on the underlying wire protocol transport being + used and the command itself. + + Implementations MAY call ``sendcommands()`` automatically if the + requested command can not coexist with other commands in this executor. + + Implementations MAY call ``sendcommands()`` automatically when the + future's ``result()`` is called. So, consumers using multiple + commands with an executor MUST ensure that ``result()`` is not called + until all command requests have been issued. + """ + + def sendcommands(): + """Trigger submission of queued command requests. + + Not all transports submit commands as soon as they are requested to + run. When called, this method forces queued command requests to be + issued. It will no-op if all commands have already been sent. + + When called, no more new commands may be issued with this executor. + """ + + def close(): + """Signal that this command request is finished. + + When called, no more new commands may be issued. All outstanding + commands that have previously been issued are waited on before + returning. This not only includes waiting for the futures to resolve, + but also waiting for all response data to arrive. In other words, + calling this waits for all on-wire state for issued command requests + to finish. + + When used as a context manager, this method is called when exiting the + context manager. + + This method may call ``sendcommands()`` if there are buffered commands. + """ + +class ipeerrequests(interfaceutil.Interface): + """Interface for executing commands on a peer.""" + + limitedarguments = interfaceutil.Attribute( + """True if the peer cannot receive large argument value for commands.""" + ) + + def commandexecutor(): + """A context manager that resolves to an ipeercommandexecutor. + + The object this resolves to can be used to issue command requests + to the peer. + + Callers should call its ``callcommand`` method to issue command + requests. + + A new executor should be obtained for each distinct set of commands + (possibly just a single command) that the consumer wants to execute + as part of a single operation or round trip. This is because some + peers are half-duplex and/or don't support persistent connections. + e.g. in the case of HTTP peers, commands sent to an executor represent + a single HTTP request. While some peers may support multiple command + sends over the wire per executor, consumers need to code to the least + capable peer. So it should be assumed that command executors buffer + called commands until they are told to send them and that each + command executor could result in a new connection or wire-level request + being issued. + """ + +class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests): + """Unified interface for peer repositories. + + All peer instances must conform to this interface. + """ + +class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests): + """Unified peer interface for wire protocol version 2 peers.""" + + apidescriptor = interfaceutil.Attribute( + """Data structure holding description of server API.""") + +@interfaceutil.implementer(ipeerbase) +class peer(object): + """Base class for peer repositories.""" + + limitedarguments = False + + def capable(self, name): + caps = self.capabilities() + if name in caps: + return True + + name = '%s=' % name + for cap in caps: + if cap.startswith(name): + return cap[len(name):] + + return False + + def requirecap(self, name, purpose): + if self.capable(name): + return + + raise error.CapabilityError( + _('cannot %s; remote repository does not support the ' + '\'%s\' capability') % (purpose, name)) + +class iverifyproblem(interfaceutil.Interface): + """Represents a problem with the integrity of the repository. + + Instances of this interface are emitted to describe an integrity issue + with a repository (e.g. corrupt storage, missing data, etc). + + Instances are essentially messages associated with severity. + """ + warning = interfaceutil.Attribute( + """Message indicating a non-fatal problem.""") + + error = interfaceutil.Attribute( + """Message indicating a fatal problem.""") + + node = interfaceutil.Attribute( + """Revision encountering the problem. + + ``None`` means the problem doesn't apply to a single revision. + """) + +class irevisiondelta(interfaceutil.Interface): + """Represents a delta between one revision and another. + + Instances convey enough information to allow a revision to be exchanged + with another repository. + + Instances represent the fulltext revision data or a delta against + another revision. Therefore the ``revision`` and ``delta`` attributes + are mutually exclusive. + + Typically used for changegroup generation. + """ + + node = interfaceutil.Attribute( + """20 byte node of this revision.""") + + p1node = interfaceutil.Attribute( + """20 byte node of 1st parent of this revision.""") + + p2node = interfaceutil.Attribute( + """20 byte node of 2nd parent of this revision.""") + + linknode = interfaceutil.Attribute( + """20 byte node of the changelog revision this node is linked to.""") + + flags = interfaceutil.Attribute( + """2 bytes of integer flags that apply to this revision. + + This is a bitwise composition of the ``REVISION_FLAG_*`` constants. + """) + + basenode = interfaceutil.Attribute( + """20 byte node of the revision this data is a delta against. + + ``nullid`` indicates that the revision is a full revision and not + a delta. + """) + + baserevisionsize = interfaceutil.Attribute( + """Size of base revision this delta is against. + + May be ``None`` if ``basenode`` is ``nullid``. + """) + + revision = interfaceutil.Attribute( + """Raw fulltext of revision data for this node.""") + + delta = interfaceutil.Attribute( + """Delta between ``basenode`` and ``node``. + + Stored in the bdiff delta format. + """) + +class ifilerevisionssequence(interfaceutil.Interface): + """Contains index data for all revisions of a file. + + Types implementing this behave like lists of tuples. The index + in the list corresponds to the revision number. The values contain + index metadata. + + The *null* revision (revision number -1) is always the last item + in the index. + """ + + def __len__(): + """The total number of revisions.""" + + def __getitem__(rev): + """Returns the object having a specific revision number. + + Returns an 8-tuple with the following fields: + + offset+flags + Contains the offset and flags for the revision. 64-bit unsigned + integer where first 6 bytes are the offset and the next 2 bytes + are flags. The offset can be 0 if it is not used by the store. + compressed size + Size of the revision data in the store. It can be 0 if it isn't + needed by the store. + uncompressed size + Fulltext size. It can be 0 if it isn't needed by the store. + base revision + Revision number of revision the delta for storage is encoded + against. -1 indicates not encoded against a base revision. + link revision + Revision number of changelog revision this entry is related to. + p1 revision + Revision number of 1st parent. -1 if no 1st parent. + p2 revision + Revision number of 2nd parent. -1 if no 1st parent. + node + Binary node value for this revision number. + + Negative values should index off the end of the sequence. ``-1`` + should return the null revision. ``-2`` should return the most + recent revision. + """ + + def __contains__(rev): + """Whether a revision number exists.""" + + def insert(self, i, entry): + """Add an item to the index at specific revision.""" + +class ifileindex(interfaceutil.Interface): + """Storage interface for index data of a single file. + + File storage data is divided into index metadata and data storage. + This interface defines the index portion of the interface. + + The index logically consists of: + + * A mapping between revision numbers and nodes. + * DAG data (storing and querying the relationship between nodes). + * Metadata to facilitate storage. + """ + def __len__(): + """Obtain the number of revisions stored for this file.""" + + def __iter__(): + """Iterate over revision numbers for this file.""" + + def hasnode(node): + """Returns a bool indicating if a node is known to this store. + + Implementations must only return True for full, binary node values: + hex nodes, revision numbers, and partial node matches must be + rejected. + + The null node is never present. + """ + + def revs(start=0, stop=None): + """Iterate over revision numbers for this file, with control.""" + + def parents(node): + """Returns a 2-tuple of parent nodes for a revision. + + Values will be ``nullid`` if the parent is empty. + """ + + def parentrevs(rev): + """Like parents() but operates on revision numbers.""" + + def rev(node): + """Obtain the revision number given a node. + + Raises ``error.LookupError`` if the node is not known. + """ + + def node(rev): + """Obtain the node value given a revision number. + + Raises ``IndexError`` if the node is not known. + """ + + def lookup(node): + """Attempt to resolve a value to a node. + + Value can be a binary node, hex node, revision number, or a string + that can be converted to an integer. + + Raises ``error.LookupError`` if a node could not be resolved. + """ + + def linkrev(rev): + """Obtain the changeset revision number a revision is linked to.""" + + def iscensored(rev): + """Return whether a revision's content has been censored.""" + + def commonancestorsheads(node1, node2): + """Obtain an iterable of nodes containing heads of common ancestors. + + See ``ancestor.commonancestorsheads()``. + """ + + def descendants(revs): + """Obtain descendant revision numbers for a set of revision numbers. + + If ``nullrev`` is in the set, this is equivalent to ``revs()``. + """ + + def heads(start=None, stop=None): + """Obtain a list of nodes that are DAG heads, with control. + + The set of revisions examined can be limited by specifying + ``start`` and ``stop``. ``start`` is a node. ``stop`` is an + iterable of nodes. DAG traversal starts at earlier revision + ``start`` and iterates forward until any node in ``stop`` is + encountered. + """ + + def children(node): + """Obtain nodes that are children of a node. + + Returns a list of nodes. + """ + +class ifiledata(interfaceutil.Interface): + """Storage interface for data storage of a specific file. + + This complements ``ifileindex`` and provides an interface for accessing + data for a tracked file. + """ + def size(rev): + """Obtain the fulltext size of file data. + + Any metadata is excluded from size measurements. + """ + + def revision(node, raw=False): + """"Obtain fulltext data for a node. + + By default, any storage transformations are applied before the data + is returned. If ``raw`` is True, non-raw storage transformations + are not applied. + + The fulltext data may contain a header containing metadata. Most + consumers should use ``read()`` to obtain the actual file data. + """ + + def rawdata(node): + """Obtain raw data for a node. + """ + + def read(node): + """Resolve file fulltext data. + + This is similar to ``revision()`` except any metadata in the data + headers is stripped. + """ + + def renamed(node): + """Obtain copy metadata for a node. + + Returns ``False`` if no copy metadata is stored or a 2-tuple of + (path, node) from which this revision was copied. + """ + + def cmp(node, fulltext): + """Compare fulltext to another revision. + + Returns True if the fulltext is different from what is stored. + + This takes copy metadata into account. + + TODO better document the copy metadata and censoring logic. + """ + + def emitrevisions(nodes, + nodesorder=None, + revisiondata=False, + assumehaveparentrevisions=False, + deltamode=CG_DELTAMODE_STD): + """Produce ``irevisiondelta`` for revisions. + + Given an iterable of nodes, emits objects conforming to the + ``irevisiondelta`` interface that describe revisions in storage. + + This method is a generator. + + The input nodes may be unordered. Implementations must ensure that a + node's parents are emitted before the node itself. Transitively, this + means that a node may only be emitted once all its ancestors in + ``nodes`` have also been emitted. + + By default, emits "index" data (the ``node``, ``p1node``, and + ``p2node`` attributes). If ``revisiondata`` is set, revision data + will also be present on the emitted objects. + + With default argument values, implementations can choose to emit + either fulltext revision data or a delta. When emitting deltas, + implementations must consider whether the delta's base revision + fulltext is available to the receiver. + + The base revision fulltext is guaranteed to be available if any of + the following are met: + + * Its fulltext revision was emitted by this method call. + * A delta for that revision was emitted by this method call. + * ``assumehaveparentrevisions`` is True and the base revision is a + parent of the node. + + ``nodesorder`` can be used to control the order that revisions are + emitted. By default, revisions can be reordered as long as they are + in DAG topological order (see above). If the value is ``nodes``, + the iteration order from ``nodes`` should be used. If the value is + ``storage``, then the native order from the backing storage layer + is used. (Not all storage layers will have strong ordering and behavior + of this mode is storage-dependent.) ``nodes`` ordering can force + revisions to be emitted before their ancestors, so consumers should + use it with care. + + The ``linknode`` attribute on the returned ``irevisiondelta`` may not + be set and it is the caller's responsibility to resolve it, if needed. + + If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested, + all revision data should be emitted as deltas against the revision + emitted just prior. The initial revision should be a delta against its + 1st parent. + """ + +class ifilemutation(interfaceutil.Interface): + """Storage interface for mutation events of a tracked file.""" + + def add(filedata, meta, transaction, linkrev, p1, p2): + """Add a new revision to the store. + + Takes file data, dictionary of metadata, a transaction, linkrev, + and parent nodes. + + Returns the node that was added. + + May no-op if a revision matching the supplied data is already stored. + """ + + def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None, + flags=0, cachedelta=None): + """Add a new revision to the store. + + This is similar to ``add()`` except it operates at a lower level. + + The data passed in already contains a metadata header, if any. + + ``node`` and ``flags`` can be used to define the expected node and + the flags to use with storage. ``flags`` is a bitwise value composed + of the various ``REVISION_FLAG_*`` constants. + + ``add()`` is usually called when adding files from e.g. the working + directory. ``addrevision()`` is often called by ``add()`` and for + scenarios where revision data has already been computed, such as when + applying raw data from a peer repo. + """ + + def addgroup(deltas, linkmapper, transaction, addrevisioncb=None, + maybemissingparents=False): + """Process a series of deltas for storage. + + ``deltas`` is an iterable of 7-tuples of + (node, p1, p2, linknode, deltabase, delta, flags) defining revisions + to add. + + The ``delta`` field contains ``mpatch`` data to apply to a base + revision, identified by ``deltabase``. The base node can be + ``nullid``, in which case the header from the delta can be ignored + and the delta used as the fulltext. + + ``addrevisioncb`` should be called for each node as it is committed. + + ``maybemissingparents`` is a bool indicating whether the incoming + data may reference parents/ancestor revisions that aren't present. + This flag is set when receiving data into a "shallow" store that + doesn't hold all history. + + Returns a list of nodes that were processed. A node will be in the list + even if it existed in the store previously. + """ + + def censorrevision(tr, node, tombstone=b''): + """Remove the content of a single revision. + + The specified ``node`` will have its content purged from storage. + Future attempts to access the revision data for this node will + result in failure. + + A ``tombstone`` message can optionally be stored. This message may be + displayed to users when they attempt to access the missing revision + data. + + Storage backends may have stored deltas against the previous content + in this revision. As part of censoring a revision, these storage + backends are expected to rewrite any internally stored deltas such + that they no longer reference the deleted content. + """ + + def getstrippoint(minlink): + """Find the minimum revision that must be stripped to strip a linkrev. + + Returns a 2-tuple containing the minimum revision number and a set + of all revisions numbers that would be broken by this strip. + + TODO this is highly revlog centric and should be abstracted into + a higher-level deletion API. ``repair.strip()`` relies on this. + """ + + def strip(minlink, transaction): + """Remove storage of items starting at a linkrev. + + This uses ``getstrippoint()`` to determine the first node to remove. + Then it effectively truncates storage for all revisions after that. + + TODO this is highly revlog centric and should be abstracted into a + higher-level deletion API. + """ + +class ifilestorage(ifileindex, ifiledata, ifilemutation): + """Complete storage interface for a single tracked file.""" + + def files(): + """Obtain paths that are backing storage for this file. + + TODO this is used heavily by verify code and there should probably + be a better API for that. + """ + + def storageinfo(exclusivefiles=False, sharedfiles=False, + revisionscount=False, trackedsize=False, + storedsize=False): + """Obtain information about storage for this file's data. + + Returns a dict describing storage for this tracked path. The keys + in the dict map to arguments of the same. The arguments are bools + indicating whether to calculate and obtain that data. + + exclusivefiles + Iterable of (vfs, path) describing files that are exclusively + used to back storage for this tracked path. + + sharedfiles + Iterable of (vfs, path) describing files that are used to back + storage for this tracked path. Those files may also provide storage + for other stored entities. + + revisionscount + Number of revisions available for retrieval. + + trackedsize + Total size in bytes of all tracked revisions. This is a sum of the + length of the fulltext of all revisions. + + storedsize + Total size in bytes used to store data for all tracked revisions. + This is commonly less than ``trackedsize`` due to internal usage + of deltas rather than fulltext revisions. + + Not all storage backends may support all queries are have a reasonable + value to use. In that case, the value should be set to ``None`` and + callers are expected to handle this special value. + """ + + def verifyintegrity(state): + """Verifies the integrity of file storage. + + ``state`` is a dict holding state of the verifier process. It can be + used to communicate data between invocations of multiple storage + primitives. + + If individual revisions cannot have their revision content resolved, + the method is expected to set the ``skipread`` key to a set of nodes + that encountered problems. + + The method yields objects conforming to the ``iverifyproblem`` + interface. + """ + +class idirs(interfaceutil.Interface): + """Interface representing a collection of directories from paths. + + This interface is essentially a derived data structure representing + directories from a collection of paths. + """ + + def addpath(path): + """Add a path to the collection. + + All directories in the path will be added to the collection. + """ + + def delpath(path): + """Remove a path from the collection. + + If the removal was the last path in a particular directory, the + directory is removed from the collection. + """ + + def __iter__(): + """Iterate over the directories in this collection of paths.""" + + def __contains__(path): + """Whether a specific directory is in this collection.""" + +class imanifestdict(interfaceutil.Interface): + """Interface representing a manifest data structure. + + A manifest is effectively a dict mapping paths to entries. Each entry + consists of a binary node and extra flags affecting that entry. + """ + + def __getitem__(path): + """Returns the binary node value for a path in the manifest. + + Raises ``KeyError`` if the path does not exist in the manifest. + + Equivalent to ``self.find(path)[0]``. + """ + + def find(path): + """Returns the entry for a path in the manifest. + + Returns a 2-tuple of (node, flags). + + Raises ``KeyError`` if the path does not exist in the manifest. + """ + + def __len__(): + """Return the number of entries in the manifest.""" + + def __nonzero__(): + """Returns True if the manifest has entries, False otherwise.""" + + __bool__ = __nonzero__ + + def __setitem__(path, node): + """Define the node value for a path in the manifest. + + If the path is already in the manifest, its flags will be copied to + the new entry. + """ + + def __contains__(path): + """Whether a path exists in the manifest.""" + + def __delitem__(path): + """Remove a path from the manifest. + + Raises ``KeyError`` if the path is not in the manifest. + """ + + def __iter__(): + """Iterate over paths in the manifest.""" + + def iterkeys(): + """Iterate over paths in the manifest.""" + + def keys(): + """Obtain a list of paths in the manifest.""" + + def filesnotin(other, match=None): + """Obtain the set of paths in this manifest but not in another. + + ``match`` is an optional matcher function to be applied to both + manifests. + + Returns a set of paths. + """ + + def dirs(): + """Returns an object implementing the ``idirs`` interface.""" + + def hasdir(dir): + """Returns a bool indicating if a directory is in this manifest.""" + + def matches(match): + """Generate a new manifest filtered through a matcher. + + Returns an object conforming to the ``imanifestdict`` interface. + """ + + def walk(match): + """Generator of paths in manifest satisfying a matcher. + + This is equivalent to ``self.matches(match).iterkeys()`` except a new + manifest object is not created. + + If the matcher has explicit files listed and they don't exist in + the manifest, ``match.bad()`` is called for each missing file. + """ + + def diff(other, match=None, clean=False): + """Find differences between this manifest and another. + + This manifest is compared to ``other``. + + If ``match`` is provided, the two manifests are filtered against this + matcher and only entries satisfying the matcher are compared. + + If ``clean`` is True, unchanged files are included in the returned + object. + + Returns a dict with paths as keys and values of 2-tuples of 2-tuples of + the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)`` + represents the node and flags for this manifest and ``(node2, flag2)`` + are the same for the other manifest. + """ + + def setflag(path, flag): + """Set the flag value for a given path. + + Raises ``KeyError`` if the path is not already in the manifest. + """ + + def get(path, default=None): + """Obtain the node value for a path or a default value if missing.""" + + def flags(path, default=''): + """Return the flags value for a path or a default value if missing.""" + + def copy(): + """Return a copy of this manifest.""" + + def items(): + """Returns an iterable of (path, node) for items in this manifest.""" + + def iteritems(): + """Identical to items().""" + + def iterentries(): + """Returns an iterable of (path, node, flags) for this manifest. + + Similar to ``iteritems()`` except items are a 3-tuple and include + flags. + """ + + def text(): + """Obtain the raw data representation for this manifest. + + Result is used to create a manifest revision. + """ + + def fastdelta(base, changes): + """Obtain a delta between this manifest and another given changes. + + ``base`` in the raw data representation for another manifest. + + ``changes`` is an iterable of ``(path, to_delete)``. + + Returns a 2-tuple containing ``bytearray(self.text())`` and the + delta between ``base`` and this manifest. + """ + +class imanifestrevisionbase(interfaceutil.Interface): + """Base interface representing a single revision of a manifest. + + Should not be used as a primary interface: should always be inherited + as part of a larger interface. + """ + + def new(): + """Obtain a new manifest instance. + + Returns an object conforming to the ``imanifestrevisionwritable`` + interface. The instance will be associated with the same + ``imanifestlog`` collection as this instance. + """ + + def copy(): + """Obtain a copy of this manifest instance. + + Returns an object conforming to the ``imanifestrevisionwritable`` + interface. The instance will be associated with the same + ``imanifestlog`` collection as this instance. + """ + + def read(): + """Obtain the parsed manifest data structure. + + The returned object conforms to the ``imanifestdict`` interface. + """ + +class imanifestrevisionstored(imanifestrevisionbase): + """Interface representing a manifest revision committed to storage.""" + + def node(): + """The binary node for this manifest.""" + + parents = interfaceutil.Attribute( + """List of binary nodes that are parents for this manifest revision.""" + ) + + def readdelta(shallow=False): + """Obtain the manifest data structure representing changes from parent. + + This manifest is compared to its 1st parent. A new manifest representing + those differences is constructed. + + The returned object conforms to the ``imanifestdict`` interface. + """ + + def readfast(shallow=False): + """Calls either ``read()`` or ``readdelta()``. + + The faster of the two options is called. + """ + + def find(key): + """Calls self.read().find(key)``. + + Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``. + """ + +class imanifestrevisionwritable(imanifestrevisionbase): + """Interface representing a manifest revision that can be committed.""" + + def write(transaction, linkrev, p1node, p2node, added, removed, match=None): + """Add this revision to storage. + + Takes a transaction object, the changeset revision number it will + be associated with, its parent nodes, and lists of added and + removed paths. + + If match is provided, storage can choose not to inspect or write out + items that do not match. Storage is still required to be able to provide + the full manifest in the future for any directories written (these + manifests should not be "narrowed on disk"). + + Returns the binary node of the created revision. + """ + +class imanifeststorage(interfaceutil.Interface): + """Storage interface for manifest data.""" + + tree = interfaceutil.Attribute( + """The path to the directory this manifest tracks. + + The empty bytestring represents the root manifest. + """) + + index = interfaceutil.Attribute( + """An ``ifilerevisionssequence`` instance.""") + + indexfile = interfaceutil.Attribute( + """Path of revlog index file. + + TODO this is revlog specific and should not be exposed. + """) + + opener = interfaceutil.Attribute( + """VFS opener to use to access underlying files used for storage. + + TODO this is revlog specific and should not be exposed. + """) + + version = interfaceutil.Attribute( + """Revlog version number. + + TODO this is revlog specific and should not be exposed. + """) + + _generaldelta = interfaceutil.Attribute( + """Whether generaldelta storage is being used. + + TODO this is revlog specific and should not be exposed. + """) + + fulltextcache = interfaceutil.Attribute( + """Dict with cache of fulltexts. + + TODO this doesn't feel appropriate for the storage interface. + """) + + def __len__(): + """Obtain the number of revisions stored for this manifest.""" + + def __iter__(): + """Iterate over revision numbers for this manifest.""" + + def rev(node): + """Obtain the revision number given a binary node. + + Raises ``error.LookupError`` if the node is not known. + """ + + def node(rev): + """Obtain the node value given a revision number. + + Raises ``error.LookupError`` if the revision is not known. + """ + + def lookup(value): + """Attempt to resolve a value to a node. + + Value can be a binary node, hex node, revision number, or a bytes + that can be converted to an integer. + + Raises ``error.LookupError`` if a ndoe could not be resolved. + """ + + def parents(node): + """Returns a 2-tuple of parent nodes for a node. + + Values will be ``nullid`` if the parent is empty. + """ + + def parentrevs(rev): + """Like parents() but operates on revision numbers.""" + + def linkrev(rev): + """Obtain the changeset revision number a revision is linked to.""" + + def revision(node, _df=None, raw=False): + """Obtain fulltext data for a node.""" + + def rawdata(node, _df=None): + """Obtain raw data for a node.""" + + def revdiff(rev1, rev2): + """Obtain a delta between two revision numbers. + + The returned data is the result of ``bdiff.bdiff()`` on the raw + revision data. + """ + + def cmp(node, fulltext): + """Compare fulltext to another revision. + + Returns True if the fulltext is different from what is stored. + """ + + def emitrevisions(nodes, + nodesorder=None, + revisiondata=False, + assumehaveparentrevisions=False): + """Produce ``irevisiondelta`` describing revisions. + + See the documentation for ``ifiledata`` for more. + """ + + def addgroup(deltas, linkmapper, transaction, addrevisioncb=None): + """Process a series of deltas for storage. + + See the documentation in ``ifilemutation`` for more. + """ + + def rawsize(rev): + """Obtain the size of tracked data. + + Is equivalent to ``len(m.rawdata(node))``. + + TODO this method is only used by upgrade code and may be removed. + """ + + def getstrippoint(minlink): + """Find minimum revision that must be stripped to strip a linkrev. + + See the documentation in ``ifilemutation`` for more. + """ + + def strip(minlink, transaction): + """Remove storage of items starting at a linkrev. + + See the documentation in ``ifilemutation`` for more. + """ + + def checksize(): + """Obtain the expected sizes of backing files. + + TODO this is used by verify and it should not be part of the interface. + """ + + def files(): + """Obtain paths that are backing storage for this manifest. + + TODO this is used by verify and there should probably be a better API + for this functionality. + """ + + def deltaparent(rev): + """Obtain the revision that a revision is delta'd against. + + TODO delta encoding is an implementation detail of storage and should + not be exposed to the storage interface. + """ + + def clone(tr, dest, **kwargs): + """Clone this instance to another.""" + + def clearcaches(clear_persisted_data=False): + """Clear any caches associated with this instance.""" + + def dirlog(d): + """Obtain a manifest storage instance for a tree.""" + + def add(m, transaction, link, p1, p2, added, removed, readtree=None, + match=None): + """Add a revision to storage. + + ``m`` is an object conforming to ``imanifestdict``. + + ``link`` is the linkrev revision number. + + ``p1`` and ``p2`` are the parent revision numbers. + + ``added`` and ``removed`` are iterables of added and removed paths, + respectively. + + ``readtree`` is a function that can be used to read the child tree(s) + when recursively writing the full tree structure when using + treemanifets. + + ``match`` is a matcher that can be used to hint to storage that not all + paths must be inspected; this is an optimization and can be safely + ignored. Note that the storage must still be able to reproduce a full + manifest including files that did not match. + """ + + def storageinfo(exclusivefiles=False, sharedfiles=False, + revisionscount=False, trackedsize=False, + storedsize=False): + """Obtain information about storage for this manifest's data. + + See ``ifilestorage.storageinfo()`` for a description of this method. + This one behaves the same way, except for manifest data. + """ + +class imanifestlog(interfaceutil.Interface): + """Interface representing a collection of manifest snapshots. + + Represents the root manifest in a repository. + + Also serves as a means to access nested tree manifests and to cache + tree manifests. + """ + + def __getitem__(node): + """Obtain a manifest instance for a given binary node. + + Equivalent to calling ``self.get('', node)``. + + The returned object conforms to the ``imanifestrevisionstored`` + interface. + """ + + def get(tree, node, verify=True): + """Retrieve the manifest instance for a given directory and binary node. + + ``node`` always refers to the node of the root manifest (which will be + the only manifest if flat manifests are being used). + + If ``tree`` is the empty string, the root manifest is returned. + Otherwise the manifest for the specified directory will be returned + (requires tree manifests). + + If ``verify`` is True, ``LookupError`` is raised if the node is not + known. + + The returned object conforms to the ``imanifestrevisionstored`` + interface. + """ + + def getstorage(tree): + """Retrieve an interface to storage for a particular tree. + + If ``tree`` is the empty bytestring, storage for the root manifest will + be returned. Otherwise storage for a tree manifest is returned. + + TODO formalize interface for returned object. + """ + + def clearcaches(): + """Clear caches associated with this collection.""" + + def rev(node): + """Obtain the revision number for a binary node. + + Raises ``error.LookupError`` if the node is not known. + """ + +class ilocalrepositoryfilestorage(interfaceutil.Interface): + """Local repository sub-interface providing access to tracked file storage. + + This interface defines how a repository accesses storage for a single + tracked file path. + """ + + def file(f): + """Obtain a filelog for a tracked path. + + The returned type conforms to the ``ifilestorage`` interface. + """ + +class ilocalrepositorymain(interfaceutil.Interface): + """Main interface for local repositories. + + This currently captures the reality of things - not how things should be. + """ + + supportedformats = interfaceutil.Attribute( + """Set of requirements that apply to stream clone. + + This is actually a class attribute and is shared among all instances. + """) + + supported = interfaceutil.Attribute( + """Set of requirements that this repo is capable of opening.""") + + requirements = interfaceutil.Attribute( + """Set of requirements this repo uses.""") + + features = interfaceutil.Attribute( + """Set of "features" this repository supports. + + A "feature" is a loosely-defined term. It can refer to a feature + in the classical sense or can describe an implementation detail + of the repository. For example, a ``readonly`` feature may denote + the repository as read-only. Or a ``revlogfilestore`` feature may + denote that the repository is using revlogs for file storage. + + The intent of features is to provide a machine-queryable mechanism + for repo consumers to test for various repository characteristics. + + Features are similar to ``requirements``. The main difference is that + requirements are stored on-disk and represent requirements to open the + repository. Features are more run-time capabilities of the repository + and more granular capabilities (which may be derived from requirements). + """) + + filtername = interfaceutil.Attribute( + """Name of the repoview that is active on this repo.""") + + wvfs = interfaceutil.Attribute( + """VFS used to access the working directory.""") + + vfs = interfaceutil.Attribute( + """VFS rooted at the .hg directory. + + Used to access repository data not in the store. + """) + + svfs = interfaceutil.Attribute( + """VFS rooted at the store. + + Used to access repository data in the store. Typically .hg/store. + But can point elsewhere if the store is shared. + """) + + root = interfaceutil.Attribute( + """Path to the root of the working directory.""") + + path = interfaceutil.Attribute( + """Path to the .hg directory.""") + + origroot = interfaceutil.Attribute( + """The filesystem path that was used to construct the repo.""") + + auditor = interfaceutil.Attribute( + """A pathauditor for the working directory. + + This checks if a path refers to a nested repository. + + Operates on the filesystem. + """) + + nofsauditor = interfaceutil.Attribute( + """A pathauditor for the working directory. + + This is like ``auditor`` except it doesn't do filesystem checks. + """) + + baseui = interfaceutil.Attribute( + """Original ui instance passed into constructor.""") + + ui = interfaceutil.Attribute( + """Main ui instance for this instance.""") + + sharedpath = interfaceutil.Attribute( + """Path to the .hg directory of the repo this repo was shared from.""") + + store = interfaceutil.Attribute( + """A store instance.""") + + spath = interfaceutil.Attribute( + """Path to the store.""") + + sjoin = interfaceutil.Attribute( + """Alias to self.store.join.""") + + cachevfs = interfaceutil.Attribute( + """A VFS used to access the cache directory. + + Typically .hg/cache. + """) + + wcachevfs = interfaceutil.Attribute( + """A VFS used to access the cache directory dedicated to working copy + + Typically .hg/wcache. + """) + + filteredrevcache = interfaceutil.Attribute( + """Holds sets of revisions to be filtered.""") + + names = interfaceutil.Attribute( + """A ``namespaces`` instance.""") + + def close(): + """Close the handle on this repository.""" + + def peer(): + """Obtain an object conforming to the ``peer`` interface.""" + + def unfiltered(): + """Obtain an unfiltered/raw view of this repo.""" + + def filtered(name, visibilityexceptions=None): + """Obtain a named view of this repository.""" + + obsstore = interfaceutil.Attribute( + """A store of obsolescence data.""") + + changelog = interfaceutil.Attribute( + """A handle on the changelog revlog.""") + + manifestlog = interfaceutil.Attribute( + """An instance conforming to the ``imanifestlog`` interface. + + Provides access to manifests for the repository. + """) + + dirstate = interfaceutil.Attribute( + """Working directory state.""") + + narrowpats = interfaceutil.Attribute( + """Matcher patterns for this repository's narrowspec.""") + + def narrowmatch(match=None, includeexact=False): + """Obtain a matcher for the narrowspec.""" + + def setnarrowpats(newincludes, newexcludes): + """Define the narrowspec for this repository.""" + + def __getitem__(changeid): + """Try to resolve a changectx.""" + + def __contains__(changeid): + """Whether a changeset exists.""" + + def __nonzero__(): + """Always returns True.""" + return True + + __bool__ = __nonzero__ + + def __len__(): + """Returns the number of changesets in the repo.""" + + def __iter__(): + """Iterate over revisions in the changelog.""" + + def revs(expr, *args): + """Evaluate a revset. + + Emits revisions. + """ + + def set(expr, *args): + """Evaluate a revset. + + Emits changectx instances. + """ + + def anyrevs(specs, user=False, localalias=None): + """Find revisions matching one of the given revsets.""" + + def url(): + """Returns a string representing the location of this repo.""" + + def hook(name, throw=False, **args): + """Call a hook.""" + + def tags(): + """Return a mapping of tag to node.""" + + def tagtype(tagname): + """Return the type of a given tag.""" + + def tagslist(): + """Return a list of tags ordered by revision.""" + + def nodetags(node): + """Return the tags associated with a node.""" + + def nodebookmarks(node): + """Return the list of bookmarks pointing to the specified node.""" + + def branchmap(): + """Return a mapping of branch to heads in that branch.""" + + def revbranchcache(): + pass + + def branchtip(branchtip, ignoremissing=False): + """Return the tip node for a given branch.""" + + def lookup(key): + """Resolve the node for a revision.""" + + def lookupbranch(key): + """Look up the branch name of the given revision or branch name.""" + + def known(nodes): + """Determine whether a series of nodes is known. + + Returns a list of bools. + """ + + def local(): + """Whether the repository is local.""" + return True + + def publishing(): + """Whether the repository is a publishing repository.""" + + def cancopy(): + pass + + def shared(): + """The type of shared repository or None.""" + + def wjoin(f, *insidef): + """Calls self.vfs.reljoin(self.root, f, *insidef)""" + + def setparents(p1, p2): + """Set the parent nodes of the working directory.""" + + def filectx(path, changeid=None, fileid=None): + """Obtain a filectx for the given file revision.""" + + def getcwd(): + """Obtain the current working directory from the dirstate.""" + + def pathto(f, cwd=None): + """Obtain the relative path to a file.""" + + def adddatafilter(name, fltr): + pass + + def wread(filename): + """Read a file from wvfs, using data filters.""" + + def wwrite(filename, data, flags, backgroundclose=False, **kwargs): + """Write data to a file in the wvfs, using data filters.""" + + def wwritedata(filename, data): + """Resolve data for writing to the wvfs, using data filters.""" + + def currenttransaction(): + """Obtain the current transaction instance or None.""" + + def transaction(desc, report=None): + """Open a new transaction to write to the repository.""" + + def undofiles(): + """Returns a list of (vfs, path) for files to undo transactions.""" + + def recover(): + """Roll back an interrupted transaction.""" + + def rollback(dryrun=False, force=False): + """Undo the last transaction. + + DANGEROUS. + """ + + def updatecaches(tr=None, full=False): + """Warm repo caches.""" + + def invalidatecaches(): + """Invalidate cached data due to the repository mutating.""" + + def invalidatevolatilesets(): + pass + + def invalidatedirstate(): + """Invalidate the dirstate.""" + + def invalidate(clearfilecache=False): + pass + + def invalidateall(): + pass + + def lock(wait=True): + """Lock the repository store and return a lock instance.""" + + def wlock(wait=True): + """Lock the non-store parts of the repository.""" + + def currentwlock(): + """Return the wlock if it's held or None.""" + + def checkcommitpatterns(wctx, vdirs, match, status, fail): + pass + + def commit(text='', user=None, date=None, match=None, force=False, + editor=False, extra=None): + """Add a new revision to the repository.""" + + def commitctx(ctx, error=False, origctx=None): + """Commit a commitctx instance to the repository.""" + + def destroying(): + """Inform the repository that nodes are about to be destroyed.""" + + def destroyed(): + """Inform the repository that nodes have been destroyed.""" + + def status(node1='.', node2=None, match=None, ignored=False, + clean=False, unknown=False, listsubrepos=False): + """Convenience method to call repo[x].status().""" + + def addpostdsstatus(ps): + pass + + def postdsstatus(): + pass + + def clearpostdsstatus(): + pass + + def heads(start=None): + """Obtain list of nodes that are DAG heads.""" + + def branchheads(branch=None, start=None, closed=False): + pass + + def branches(nodes): + pass + + def between(pairs): + pass + + def checkpush(pushop): + pass + + prepushoutgoinghooks = interfaceutil.Attribute( + """util.hooks instance.""") + + def pushkey(namespace, key, old, new): + pass + + def listkeys(namespace): + pass + + def debugwireargs(one, two, three=None, four=None, five=None): + pass + + def savecommitmessage(text): + pass + +class completelocalrepository(ilocalrepositorymain, + ilocalrepositoryfilestorage): + """Complete interface for a local repository.""" + +class iwireprotocolcommandcacher(interfaceutil.Interface): + """Represents a caching backend for wire protocol commands. + + Wire protocol version 2 supports transparent caching of many commands. + To leverage this caching, servers can activate objects that cache + command responses. Objects handle both cache writing and reading. + This interface defines how that response caching mechanism works. + + Wire protocol version 2 commands emit a series of objects that are + serialized and sent to the client. The caching layer exists between + the invocation of the command function and the sending of its output + objects to an output layer. + + Instances of this interface represent a binding to a cache that + can serve a response (in place of calling a command function) and/or + write responses to a cache for subsequent use. + + When a command request arrives, the following happens with regards + to this interface: + + 1. The server determines whether the command request is cacheable. + 2. If it is, an instance of this interface is spawned. + 3. The cacher is activated in a context manager (``__enter__`` is called). + 4. A cache *key* for that request is derived. This will call the + instance's ``adjustcachekeystate()`` method so the derivation + can be influenced. + 5. The cacher is informed of the derived cache key via a call to + ``setcachekey()``. + 6. The cacher's ``lookup()`` method is called to test for presence of + the derived key in the cache. + 7. If ``lookup()`` returns a hit, that cached result is used in place + of invoking the command function. ``__exit__`` is called and the instance + is discarded. + 8. The command function is invoked. + 9. ``onobject()`` is called for each object emitted by the command + function. + 10. After the final object is seen, ``onfinished()`` is called. + 11. ``__exit__`` is called to signal the end of use of the instance. + + Cache *key* derivation can be influenced by the instance. + + Cache keys are initially derived by a deterministic representation of + the command request. This includes the command name, arguments, protocol + version, etc. This initial key derivation is performed by CBOR-encoding a + data structure and feeding that output into a hasher. + + Instances of this interface can influence this initial key derivation + via ``adjustcachekeystate()``. + + The instance is informed of the derived cache key via a call to + ``setcachekey()``. The instance must store the key locally so it can + be consulted on subsequent operations that may require it. + + When constructed, the instance has access to a callable that can be used + for encoding response objects. This callable receives as its single + argument an object emitted by a command function. It returns an iterable + of bytes chunks representing the encoded object. Unless the cacher is + caching native Python objects in memory or has a way of reconstructing + the original Python objects, implementations typically call this function + to produce bytes from the output objects and then store those bytes in + the cache. When it comes time to re-emit those bytes, they are wrapped + in a ``wireprototypes.encodedresponse`` instance to tell the output + layer that they are pre-encoded. + + When receiving the objects emitted by the command function, instances + can choose what to do with those objects. The simplest thing to do is + re-emit the original objects. They will be forwarded to the output + layer and will be processed as if the cacher did not exist. + + Implementations could also choose to not emit objects - instead locally + buffering objects or their encoded representation. They could then emit + a single "coalesced" object when ``onfinished()`` is called. In + this way, the implementation would function as a filtering layer of + sorts. + + When caching objects, typically the encoded form of the object will + be stored. Keep in mind that if the original object is forwarded to + the output layer, it will need to be encoded there as well. For large + output, this redundant encoding could add overhead. Implementations + could wrap the encoded object data in ``wireprototypes.encodedresponse`` + instances to avoid this overhead. + """ + def __enter__(): + """Marks the instance as active. + + Should return self. + """ + + def __exit__(exctype, excvalue, exctb): + """Called when cacher is no longer used. + + This can be used by implementations to perform cleanup actions (e.g. + disconnecting network sockets, aborting a partially cached response. + """ + + def adjustcachekeystate(state): + """Influences cache key derivation by adjusting state to derive key. + + A dict defining the state used to derive the cache key is passed. + + Implementations can modify this dict to record additional state that + is wanted to influence key derivation. + + Implementations are *highly* encouraged to not modify or delete + existing keys. + """ + + def setcachekey(key): + """Record the derived cache key for this request. + + Instances may mutate the key for internal usage, as desired. e.g. + instances may wish to prepend the repo name, introduce path + components for filesystem or URL addressing, etc. Behavior is up to + the cache. + + Returns a bool indicating if the request is cacheable by this + instance. + """ + + def lookup(): + """Attempt to resolve an entry in the cache. + + The instance is instructed to look for the cache key that it was + informed about via the call to ``setcachekey()``. + + If there's no cache hit or the cacher doesn't wish to use the cached + entry, ``None`` should be returned. + + Else, a dict defining the cached result should be returned. The + dict may have the following keys: + + objs + An iterable of objects that should be sent to the client. That + iterable of objects is expected to be what the command function + would return if invoked or an equivalent representation thereof. + """ + + def onobject(obj): + """Called when a new object is emitted from the command function. + + Receives as its argument the object that was emitted from the + command function. + + This method returns an iterator of objects to forward to the output + layer. The easiest implementation is a generator that just + ``yield obj``. + """ + + def onfinished(): + """Called after all objects have been emitted from the command function. + + Implementations should return an iterator of objects to forward to + the output layer. + + This method can be a generator. + """
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/interfaces/util.py Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,40 @@ +# util.py - Utilities for declaring interfaces. +# +# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +# zope.interface imposes a run-time cost due to module import overhead and +# bookkeeping for declaring interfaces. So, we use stubs for various +# zope.interface primitives unless instructed otherwise. + +from __future__ import absolute_import + +from .. import ( + encoding, +) + +if encoding.environ.get('HGREALINTERFACES'): + from ..thirdparty.zope import ( + interface as zi, + ) + + Attribute = zi.Attribute + Interface = zi.Interface + implementer = zi.implementer +else: + class Attribute(object): + def __init__(self, __name__, __doc__=''): + pass + + class Interface(object): + def __init__(self, name, bases=(), attrs=None, __doc__=None, + __module__=None): + pass + + def implementer(*ifaces): + def wrapper(cls): + return cls + + return wrapper
--- a/mercurial/localrepo.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/localrepo.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,7 +28,6 @@ branchmap, bundle2, changegroup, - changelog, color, context, dirstate, @@ -41,7 +40,6 @@ filelog, hook, lock as lockmod, - manifest, match as matchmod, merge as mergemod, mergeutil, @@ -52,7 +50,6 @@ phases, pushkey, pycompat, - repository, repoview, revset, revsetlang, @@ -66,8 +63,13 @@ util, vfs as vfsmod, ) + +from .interfaces import ( + repository, + util as interfaceutil, +) + from .utils import ( - interfaceutil, procutil, stringutil, ) @@ -1300,14 +1302,11 @@ @storecache('00changelog.i') def changelog(self): - return changelog.changelog(self.svfs, - trypending=txnutil.mayhavepending(self.root)) + return self.store.changelog(txnutil.mayhavepending(self.root)) @storecache('00manifest.i') def manifestlog(self): - rootstore = manifest.manifestrevlog(self.svfs) - return manifest.manifestlog(self.svfs, self, rootstore, - self._storenarrowmatch) + return self.store.manifestlog(self, self._storenarrowmatch) @repofilecache('dirstate') def dirstate(self): @@ -1942,6 +1941,12 @@ **pycompat.strkwargs(tr.hookargs)) def releasefn(tr, success): repo = reporef() + if repo is None: + # If the repo has been GC'd (and this release function is being + # called from transaction.__del__), there's not much we can do, + # so just leave the unfinished transaction there and let the + # user run `hg recover`. + return if success: # this should be explicitly invoked here, because # in-memory changes aren't written out at closing @@ -2214,6 +2219,16 @@ self.tags() self.filtered('served').tags() + # The `full` arg is documented as updating even the lazily-loaded + # caches immediately, so we're forcing a write to cause these caches + # to be warmed up even if they haven't explicitly been requested + # yet (if they've never been used by hg, they won't ever have been + # written, even if they're a subset of another kind of cache that + # *has* been used). + for filt in repoview.filtertable.keys(): + filtered = self.filtered(filt) + filtered.branchmap().write(filtered) + def invalidatecaches(self): if r'_tagscache' in vars(self):
--- a/mercurial/manifest.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/manifest.py Mon Sep 09 17:26:17 2019 -0400 @@ -24,12 +24,12 @@ mdiff, policy, pycompat, - repository, revlog, util, ) -from .utils import ( - interfaceutil, +from .interfaces import ( + repository, + util as interfaceutil, ) parsers = policy.importmod(r'parsers') @@ -1620,6 +1620,9 @@ def revision(self, node, _df=None, raw=False): return self._revlog.revision(node, _df=_df, raw=raw) + def rawdata(self, node, _df=None): + return self._revlog.rawdata(node, _df=_df) + def revdiff(self, rev1, rev2): return self._revlog.revdiff(rev1, rev2)
--- a/mercurial/match.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/match.py Mon Sep 09 17:26:17 2019 -0400 @@ -25,7 +25,7 @@ stringutil, ) -rustmod = policy.importrust('filepatterns') +rustmod = policy.importrust(r'filepatterns') allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', 'rootglob', @@ -1223,7 +1223,12 @@ # Anything after the pattern must be a non-directory. return escaped + '[^/]+$' if kind == 'relglob': - return '(?:|.*/)' + _globre(pat) + globsuffix + globre = _globre(pat) + if globre.startswith('[^/]*'): + # When pat has the form *XYZ (common), make the returned regex more + # legible by returning the regex for **XYZ instead of **/*XYZ. + return '.*' + globre[len('[^/]*'):] + globsuffix + return '(?:|.*/)' + globre + globsuffix if kind == 'relre': if pat.startswith('^'): return pat
--- a/mercurial/merge.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/merge.py Mon Sep 09 17:26:17 2019 -0400 @@ -2025,7 +2025,8 @@ raise error.Abort(_("outstanding uncommitted merge")) ms = mergestate.read(repo) if list(ms.unresolved()): - raise error.Abort(_("outstanding merge conflicts")) + raise error.Abort(_("outstanding merge conflicts"), + hint=_("use 'hg resolve' to resolve")) if branchmerge: if pas == [p2]: raise error.Abort(_("merging with a working directory ancestor"
--- a/mercurial/narrowspec.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/narrowspec.py Mon Sep 09 17:26:17 2019 -0400 @@ -8,11 +8,13 @@ from __future__ import absolute_import from .i18n import _ +from .interfaces import ( + repository, +) from . import ( error, match as matchmod, merge, - repository, scmutil, sparse, util,
--- a/mercurial/patch.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/patch.py Mon Sep 09 17:26:17 2019 -0400 @@ -864,7 +864,7 @@ allhunks_re = re.compile('(?:index|deleted file) ') pretty_re = re.compile('(?:new file|deleted file) ') special_re = re.compile('(?:index|deleted|copy|rename|new mode) ') - newfile_re = re.compile('(?:new file)') + newfile_re = re.compile('(?:new file|copy to|rename to)') def __init__(self, header): self.header = header
--- a/mercurial/repair.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/repair.py Mon Sep 09 17:26:17 2019 -0400 @@ -81,14 +81,12 @@ _, brokenset = revlog.getstrippoint(striprev) return [revlog.linkrev(r) for r in brokenset] -def _collectmanifest(repo, striprev): - return _collectrevlog(repo.manifestlog.getstorage(b''), striprev) - def _collectbrokencsets(repo, files, striprev): """return the changesets which will be broken by the truncation""" s = set() - s.update(_collectmanifest(repo, striprev)) + for revlog in manifestrevlogs(repo): + s.update(_collectrevlog(revlog, striprev)) for fname in files: s.update(_collectrevlog(repo.file(fname), striprev)) @@ -364,11 +362,11 @@ callback.addnodes(nodelist) def stripmanifest(repo, striprev, tr, files): - revlog = repo.manifestlog.getstorage(b'') - revlog.strip(striprev, tr) - striptrees(repo, tr, striprev, files) + for revlog in manifestrevlogs(repo): + revlog.strip(striprev, tr) -def striptrees(repo, tr, striprev, files): +def manifestrevlogs(repo): + yield repo.manifestlog.getstorage(b'') if 'treemanifest' in repo.requirements: # This logic is safe if treemanifest isn't enabled, but also # pointless, so we skip it if treemanifest isn't enabled. @@ -376,7 +374,7 @@ if (unencoded.startswith('meta/') and unencoded.endswith('00manifest.i')): dir = unencoded[5:-12] - repo.manifestlog.getstorage(dir).strip(striprev, tr) + yield repo.manifestlog.getstorage(dir) def rebuildfncache(ui, repo): """Rebuilds the fncache file from repo history.
--- a/mercurial/repository.py Sat Sep 07 14:35:21 2019 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1870 +0,0 @@ -# repository.py - Interfaces and base classes for repositories and peers. -# -# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -from .i18n import _ -from . import ( - error, -) -from .utils import ( - interfaceutil, -) - -# When narrowing is finalized and no longer subject to format changes, -# we should move this to just "narrow" or similar. -NARROW_REQUIREMENT = 'narrowhg-experimental' - -# Local repository feature string. - -# Revlogs are being used for file storage. -REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage' -# The storage part of the repository is shared from an external source. -REPO_FEATURE_SHARED_STORAGE = b'sharedstore' -# LFS supported for backing file storage. -REPO_FEATURE_LFS = b'lfs' -# Repository supports being stream cloned. -REPO_FEATURE_STREAM_CLONE = b'streamclone' -# Files storage may lack data for all ancestors. -REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage' - -REVISION_FLAG_CENSORED = 1 << 15 -REVISION_FLAG_ELLIPSIS = 1 << 14 -REVISION_FLAG_EXTSTORED = 1 << 13 - -REVISION_FLAGS_KNOWN = ( - REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED) - -CG_DELTAMODE_STD = b'default' -CG_DELTAMODE_PREV = b'previous' -CG_DELTAMODE_FULL = b'fulltext' -CG_DELTAMODE_P1 = b'p1' - -class ipeerconnection(interfaceutil.Interface): - """Represents a "connection" to a repository. - - This is the base interface for representing a connection to a repository. - It holds basic properties and methods applicable to all peer types. - - This is not a complete interface definition and should not be used - outside of this module. - """ - ui = interfaceutil.Attribute("""ui.ui instance""") - - def url(): - """Returns a URL string representing this peer. - - Currently, implementations expose the raw URL used to construct the - instance. It may contain credentials as part of the URL. The - expectations of the value aren't well-defined and this could lead to - data leakage. - - TODO audit/clean consumers and more clearly define the contents of this - value. - """ - - def local(): - """Returns a local repository instance. - - If the peer represents a local repository, returns an object that - can be used to interface with it. Otherwise returns ``None``. - """ - - def peer(): - """Returns an object conforming to this interface. - - Most implementations will ``return self``. - """ - - def canpush(): - """Returns a boolean indicating if this peer can be pushed to.""" - - def close(): - """Close the connection to this peer. - - This is called when the peer will no longer be used. Resources - associated with the peer should be cleaned up. - """ - -class ipeercapabilities(interfaceutil.Interface): - """Peer sub-interface related to capabilities.""" - - def capable(name): - """Determine support for a named capability. - - Returns ``False`` if capability not supported. - - Returns ``True`` if boolean capability is supported. Returns a string - if capability support is non-boolean. - - Capability strings may or may not map to wire protocol capabilities. - """ - - def requirecap(name, purpose): - """Require a capability to be present. - - Raises a ``CapabilityError`` if the capability isn't present. - """ - -class ipeercommands(interfaceutil.Interface): - """Client-side interface for communicating over the wire protocol. - - This interface is used as a gateway to the Mercurial wire protocol. - methods commonly call wire protocol commands of the same name. - """ - - def branchmap(): - """Obtain heads in named branches. - - Returns a dict mapping branch name to an iterable of nodes that are - heads on that branch. - """ - - def capabilities(): - """Obtain capabilities of the peer. - - Returns a set of string capabilities. - """ - - def clonebundles(): - """Obtains the clone bundles manifest for the repo. - - Returns the manifest as unparsed bytes. - """ - - def debugwireargs(one, two, three=None, four=None, five=None): - """Used to facilitate debugging of arguments passed over the wire.""" - - def getbundle(source, **kwargs): - """Obtain remote repository data as a bundle. - - This command is how the bulk of repository data is transferred from - the peer to the local repository - - Returns a generator of bundle data. - """ - - def heads(): - """Determine all known head revisions in the peer. - - Returns an iterable of binary nodes. - """ - - def known(nodes): - """Determine whether multiple nodes are known. - - Accepts an iterable of nodes whose presence to check for. - - Returns an iterable of booleans indicating of the corresponding node - at that index is known to the peer. - """ - - def listkeys(namespace): - """Obtain all keys in a pushkey namespace. - - Returns an iterable of key names. - """ - - def lookup(key): - """Resolve a value to a known revision. - - Returns a binary node of the resolved revision on success. - """ - - def pushkey(namespace, key, old, new): - """Set a value using the ``pushkey`` protocol. - - Arguments correspond to the pushkey namespace and key to operate on and - the old and new values for that key. - - Returns a string with the peer result. The value inside varies by the - namespace. - """ - - def stream_out(): - """Obtain streaming clone data. - - Successful result should be a generator of data chunks. - """ - - def unbundle(bundle, heads, url): - """Transfer repository data to the peer. - - This is how the bulk of data during a push is transferred. - - Returns the integer number of heads added to the peer. - """ - -class ipeerlegacycommands(interfaceutil.Interface): - """Interface for implementing support for legacy wire protocol commands. - - Wire protocol commands transition to legacy status when they are no longer - used by modern clients. To facilitate identifying which commands are - legacy, the interfaces are split. - """ - - def between(pairs): - """Obtain nodes between pairs of nodes. - - ``pairs`` is an iterable of node pairs. - - Returns an iterable of iterables of nodes corresponding to each - requested pair. - """ - - def branches(nodes): - """Obtain ancestor changesets of specific nodes back to a branch point. - - For each requested node, the peer finds the first ancestor node that is - a DAG root or is a merge. - - Returns an iterable of iterables with the resolved values for each node. - """ - - def changegroup(nodes, source): - """Obtain a changegroup with data for descendants of specified nodes.""" - - def changegroupsubset(bases, heads, source): - pass - -class ipeercommandexecutor(interfaceutil.Interface): - """Represents a mechanism to execute remote commands. - - This is the primary interface for requesting that wire protocol commands - be executed. Instances of this interface are active in a context manager - and have a well-defined lifetime. When the context manager exits, all - outstanding requests are waited on. - """ - - def callcommand(name, args): - """Request that a named command be executed. - - Receives the command name and a dictionary of command arguments. - - Returns a ``concurrent.futures.Future`` that will resolve to the - result of that command request. That exact value is left up to - the implementation and possibly varies by command. - - Not all commands can coexist with other commands in an executor - instance: it depends on the underlying wire protocol transport being - used and the command itself. - - Implementations MAY call ``sendcommands()`` automatically if the - requested command can not coexist with other commands in this executor. - - Implementations MAY call ``sendcommands()`` automatically when the - future's ``result()`` is called. So, consumers using multiple - commands with an executor MUST ensure that ``result()`` is not called - until all command requests have been issued. - """ - - def sendcommands(): - """Trigger submission of queued command requests. - - Not all transports submit commands as soon as they are requested to - run. When called, this method forces queued command requests to be - issued. It will no-op if all commands have already been sent. - - When called, no more new commands may be issued with this executor. - """ - - def close(): - """Signal that this command request is finished. - - When called, no more new commands may be issued. All outstanding - commands that have previously been issued are waited on before - returning. This not only includes waiting for the futures to resolve, - but also waiting for all response data to arrive. In other words, - calling this waits for all on-wire state for issued command requests - to finish. - - When used as a context manager, this method is called when exiting the - context manager. - - This method may call ``sendcommands()`` if there are buffered commands. - """ - -class ipeerrequests(interfaceutil.Interface): - """Interface for executing commands on a peer.""" - - limitedarguments = interfaceutil.Attribute( - """True if the peer cannot receive large argument value for commands.""" - ) - - def commandexecutor(): - """A context manager that resolves to an ipeercommandexecutor. - - The object this resolves to can be used to issue command requests - to the peer. - - Callers should call its ``callcommand`` method to issue command - requests. - - A new executor should be obtained for each distinct set of commands - (possibly just a single command) that the consumer wants to execute - as part of a single operation or round trip. This is because some - peers are half-duplex and/or don't support persistent connections. - e.g. in the case of HTTP peers, commands sent to an executor represent - a single HTTP request. While some peers may support multiple command - sends over the wire per executor, consumers need to code to the least - capable peer. So it should be assumed that command executors buffer - called commands until they are told to send them and that each - command executor could result in a new connection or wire-level request - being issued. - """ - -class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests): - """Unified interface for peer repositories. - - All peer instances must conform to this interface. - """ - -class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests): - """Unified peer interface for wire protocol version 2 peers.""" - - apidescriptor = interfaceutil.Attribute( - """Data structure holding description of server API.""") - -@interfaceutil.implementer(ipeerbase) -class peer(object): - """Base class for peer repositories.""" - - limitedarguments = False - - def capable(self, name): - caps = self.capabilities() - if name in caps: - return True - - name = '%s=' % name - for cap in caps: - if cap.startswith(name): - return cap[len(name):] - - return False - - def requirecap(self, name, purpose): - if self.capable(name): - return - - raise error.CapabilityError( - _('cannot %s; remote repository does not support the ' - '\'%s\' capability') % (purpose, name)) - -class iverifyproblem(interfaceutil.Interface): - """Represents a problem with the integrity of the repository. - - Instances of this interface are emitted to describe an integrity issue - with a repository (e.g. corrupt storage, missing data, etc). - - Instances are essentially messages associated with severity. - """ - warning = interfaceutil.Attribute( - """Message indicating a non-fatal problem.""") - - error = interfaceutil.Attribute( - """Message indicating a fatal problem.""") - - node = interfaceutil.Attribute( - """Revision encountering the problem. - - ``None`` means the problem doesn't apply to a single revision. - """) - -class irevisiondelta(interfaceutil.Interface): - """Represents a delta between one revision and another. - - Instances convey enough information to allow a revision to be exchanged - with another repository. - - Instances represent the fulltext revision data or a delta against - another revision. Therefore the ``revision`` and ``delta`` attributes - are mutually exclusive. - - Typically used for changegroup generation. - """ - - node = interfaceutil.Attribute( - """20 byte node of this revision.""") - - p1node = interfaceutil.Attribute( - """20 byte node of 1st parent of this revision.""") - - p2node = interfaceutil.Attribute( - """20 byte node of 2nd parent of this revision.""") - - linknode = interfaceutil.Attribute( - """20 byte node of the changelog revision this node is linked to.""") - - flags = interfaceutil.Attribute( - """2 bytes of integer flags that apply to this revision. - - This is a bitwise composition of the ``REVISION_FLAG_*`` constants. - """) - - basenode = interfaceutil.Attribute( - """20 byte node of the revision this data is a delta against. - - ``nullid`` indicates that the revision is a full revision and not - a delta. - """) - - baserevisionsize = interfaceutil.Attribute( - """Size of base revision this delta is against. - - May be ``None`` if ``basenode`` is ``nullid``. - """) - - revision = interfaceutil.Attribute( - """Raw fulltext of revision data for this node.""") - - delta = interfaceutil.Attribute( - """Delta between ``basenode`` and ``node``. - - Stored in the bdiff delta format. - """) - -class ifilerevisionssequence(interfaceutil.Interface): - """Contains index data for all revisions of a file. - - Types implementing this behave like lists of tuples. The index - in the list corresponds to the revision number. The values contain - index metadata. - - The *null* revision (revision number -1) is always the last item - in the index. - """ - - def __len__(): - """The total number of revisions.""" - - def __getitem__(rev): - """Returns the object having a specific revision number. - - Returns an 8-tuple with the following fields: - - offset+flags - Contains the offset and flags for the revision. 64-bit unsigned - integer where first 6 bytes are the offset and the next 2 bytes - are flags. The offset can be 0 if it is not used by the store. - compressed size - Size of the revision data in the store. It can be 0 if it isn't - needed by the store. - uncompressed size - Fulltext size. It can be 0 if it isn't needed by the store. - base revision - Revision number of revision the delta for storage is encoded - against. -1 indicates not encoded against a base revision. - link revision - Revision number of changelog revision this entry is related to. - p1 revision - Revision number of 1st parent. -1 if no 1st parent. - p2 revision - Revision number of 2nd parent. -1 if no 1st parent. - node - Binary node value for this revision number. - - Negative values should index off the end of the sequence. ``-1`` - should return the null revision. ``-2`` should return the most - recent revision. - """ - - def __contains__(rev): - """Whether a revision number exists.""" - - def insert(self, i, entry): - """Add an item to the index at specific revision.""" - -class ifileindex(interfaceutil.Interface): - """Storage interface for index data of a single file. - - File storage data is divided into index metadata and data storage. - This interface defines the index portion of the interface. - - The index logically consists of: - - * A mapping between revision numbers and nodes. - * DAG data (storing and querying the relationship between nodes). - * Metadata to facilitate storage. - """ - def __len__(): - """Obtain the number of revisions stored for this file.""" - - def __iter__(): - """Iterate over revision numbers for this file.""" - - def hasnode(node): - """Returns a bool indicating if a node is known to this store. - - Implementations must only return True for full, binary node values: - hex nodes, revision numbers, and partial node matches must be - rejected. - - The null node is never present. - """ - - def revs(start=0, stop=None): - """Iterate over revision numbers for this file, with control.""" - - def parents(node): - """Returns a 2-tuple of parent nodes for a revision. - - Values will be ``nullid`` if the parent is empty. - """ - - def parentrevs(rev): - """Like parents() but operates on revision numbers.""" - - def rev(node): - """Obtain the revision number given a node. - - Raises ``error.LookupError`` if the node is not known. - """ - - def node(rev): - """Obtain the node value given a revision number. - - Raises ``IndexError`` if the node is not known. - """ - - def lookup(node): - """Attempt to resolve a value to a node. - - Value can be a binary node, hex node, revision number, or a string - that can be converted to an integer. - - Raises ``error.LookupError`` if a node could not be resolved. - """ - - def linkrev(rev): - """Obtain the changeset revision number a revision is linked to.""" - - def iscensored(rev): - """Return whether a revision's content has been censored.""" - - def commonancestorsheads(node1, node2): - """Obtain an iterable of nodes containing heads of common ancestors. - - See ``ancestor.commonancestorsheads()``. - """ - - def descendants(revs): - """Obtain descendant revision numbers for a set of revision numbers. - - If ``nullrev`` is in the set, this is equivalent to ``revs()``. - """ - - def heads(start=None, stop=None): - """Obtain a list of nodes that are DAG heads, with control. - - The set of revisions examined can be limited by specifying - ``start`` and ``stop``. ``start`` is a node. ``stop`` is an - iterable of nodes. DAG traversal starts at earlier revision - ``start`` and iterates forward until any node in ``stop`` is - encountered. - """ - - def children(node): - """Obtain nodes that are children of a node. - - Returns a list of nodes. - """ - -class ifiledata(interfaceutil.Interface): - """Storage interface for data storage of a specific file. - - This complements ``ifileindex`` and provides an interface for accessing - data for a tracked file. - """ - def size(rev): - """Obtain the fulltext size of file data. - - Any metadata is excluded from size measurements. - """ - - def revision(node, raw=False): - """"Obtain fulltext data for a node. - - By default, any storage transformations are applied before the data - is returned. If ``raw`` is True, non-raw storage transformations - are not applied. - - The fulltext data may contain a header containing metadata. Most - consumers should use ``read()`` to obtain the actual file data. - """ - - def read(node): - """Resolve file fulltext data. - - This is similar to ``revision()`` except any metadata in the data - headers is stripped. - """ - - def renamed(node): - """Obtain copy metadata for a node. - - Returns ``False`` if no copy metadata is stored or a 2-tuple of - (path, node) from which this revision was copied. - """ - - def cmp(node, fulltext): - """Compare fulltext to another revision. - - Returns True if the fulltext is different from what is stored. - - This takes copy metadata into account. - - TODO better document the copy metadata and censoring logic. - """ - - def emitrevisions(nodes, - nodesorder=None, - revisiondata=False, - assumehaveparentrevisions=False, - deltamode=CG_DELTAMODE_STD): - """Produce ``irevisiondelta`` for revisions. - - Given an iterable of nodes, emits objects conforming to the - ``irevisiondelta`` interface that describe revisions in storage. - - This method is a generator. - - The input nodes may be unordered. Implementations must ensure that a - node's parents are emitted before the node itself. Transitively, this - means that a node may only be emitted once all its ancestors in - ``nodes`` have also been emitted. - - By default, emits "index" data (the ``node``, ``p1node``, and - ``p2node`` attributes). If ``revisiondata`` is set, revision data - will also be present on the emitted objects. - - With default argument values, implementations can choose to emit - either fulltext revision data or a delta. When emitting deltas, - implementations must consider whether the delta's base revision - fulltext is available to the receiver. - - The base revision fulltext is guaranteed to be available if any of - the following are met: - - * Its fulltext revision was emitted by this method call. - * A delta for that revision was emitted by this method call. - * ``assumehaveparentrevisions`` is True and the base revision is a - parent of the node. - - ``nodesorder`` can be used to control the order that revisions are - emitted. By default, revisions can be reordered as long as they are - in DAG topological order (see above). If the value is ``nodes``, - the iteration order from ``nodes`` should be used. If the value is - ``storage``, then the native order from the backing storage layer - is used. (Not all storage layers will have strong ordering and behavior - of this mode is storage-dependent.) ``nodes`` ordering can force - revisions to be emitted before their ancestors, so consumers should - use it with care. - - The ``linknode`` attribute on the returned ``irevisiondelta`` may not - be set and it is the caller's responsibility to resolve it, if needed. - - If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested, - all revision data should be emitted as deltas against the revision - emitted just prior. The initial revision should be a delta against its - 1st parent. - """ - -class ifilemutation(interfaceutil.Interface): - """Storage interface for mutation events of a tracked file.""" - - def add(filedata, meta, transaction, linkrev, p1, p2): - """Add a new revision to the store. - - Takes file data, dictionary of metadata, a transaction, linkrev, - and parent nodes. - - Returns the node that was added. - - May no-op if a revision matching the supplied data is already stored. - """ - - def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None, - flags=0, cachedelta=None): - """Add a new revision to the store. - - This is similar to ``add()`` except it operates at a lower level. - - The data passed in already contains a metadata header, if any. - - ``node`` and ``flags`` can be used to define the expected node and - the flags to use with storage. ``flags`` is a bitwise value composed - of the various ``REVISION_FLAG_*`` constants. - - ``add()`` is usually called when adding files from e.g. the working - directory. ``addrevision()`` is often called by ``add()`` and for - scenarios where revision data has already been computed, such as when - applying raw data from a peer repo. - """ - - def addgroup(deltas, linkmapper, transaction, addrevisioncb=None, - maybemissingparents=False): - """Process a series of deltas for storage. - - ``deltas`` is an iterable of 7-tuples of - (node, p1, p2, linknode, deltabase, delta, flags) defining revisions - to add. - - The ``delta`` field contains ``mpatch`` data to apply to a base - revision, identified by ``deltabase``. The base node can be - ``nullid``, in which case the header from the delta can be ignored - and the delta used as the fulltext. - - ``addrevisioncb`` should be called for each node as it is committed. - - ``maybemissingparents`` is a bool indicating whether the incoming - data may reference parents/ancestor revisions that aren't present. - This flag is set when receiving data into a "shallow" store that - doesn't hold all history. - - Returns a list of nodes that were processed. A node will be in the list - even if it existed in the store previously. - """ - - def censorrevision(tr, node, tombstone=b''): - """Remove the content of a single revision. - - The specified ``node`` will have its content purged from storage. - Future attempts to access the revision data for this node will - result in failure. - - A ``tombstone`` message can optionally be stored. This message may be - displayed to users when they attempt to access the missing revision - data. - - Storage backends may have stored deltas against the previous content - in this revision. As part of censoring a revision, these storage - backends are expected to rewrite any internally stored deltas such - that they no longer reference the deleted content. - """ - - def getstrippoint(minlink): - """Find the minimum revision that must be stripped to strip a linkrev. - - Returns a 2-tuple containing the minimum revision number and a set - of all revisions numbers that would be broken by this strip. - - TODO this is highly revlog centric and should be abstracted into - a higher-level deletion API. ``repair.strip()`` relies on this. - """ - - def strip(minlink, transaction): - """Remove storage of items starting at a linkrev. - - This uses ``getstrippoint()`` to determine the first node to remove. - Then it effectively truncates storage for all revisions after that. - - TODO this is highly revlog centric and should be abstracted into a - higher-level deletion API. - """ - -class ifilestorage(ifileindex, ifiledata, ifilemutation): - """Complete storage interface for a single tracked file.""" - - def files(): - """Obtain paths that are backing storage for this file. - - TODO this is used heavily by verify code and there should probably - be a better API for that. - """ - - def storageinfo(exclusivefiles=False, sharedfiles=False, - revisionscount=False, trackedsize=False, - storedsize=False): - """Obtain information about storage for this file's data. - - Returns a dict describing storage for this tracked path. The keys - in the dict map to arguments of the same. The arguments are bools - indicating whether to calculate and obtain that data. - - exclusivefiles - Iterable of (vfs, path) describing files that are exclusively - used to back storage for this tracked path. - - sharedfiles - Iterable of (vfs, path) describing files that are used to back - storage for this tracked path. Those files may also provide storage - for other stored entities. - - revisionscount - Number of revisions available for retrieval. - - trackedsize - Total size in bytes of all tracked revisions. This is a sum of the - length of the fulltext of all revisions. - - storedsize - Total size in bytes used to store data for all tracked revisions. - This is commonly less than ``trackedsize`` due to internal usage - of deltas rather than fulltext revisions. - - Not all storage backends may support all queries are have a reasonable - value to use. In that case, the value should be set to ``None`` and - callers are expected to handle this special value. - """ - - def verifyintegrity(state): - """Verifies the integrity of file storage. - - ``state`` is a dict holding state of the verifier process. It can be - used to communicate data between invocations of multiple storage - primitives. - - If individual revisions cannot have their revision content resolved, - the method is expected to set the ``skipread`` key to a set of nodes - that encountered problems. - - The method yields objects conforming to the ``iverifyproblem`` - interface. - """ - -class idirs(interfaceutil.Interface): - """Interface representing a collection of directories from paths. - - This interface is essentially a derived data structure representing - directories from a collection of paths. - """ - - def addpath(path): - """Add a path to the collection. - - All directories in the path will be added to the collection. - """ - - def delpath(path): - """Remove a path from the collection. - - If the removal was the last path in a particular directory, the - directory is removed from the collection. - """ - - def __iter__(): - """Iterate over the directories in this collection of paths.""" - - def __contains__(path): - """Whether a specific directory is in this collection.""" - -class imanifestdict(interfaceutil.Interface): - """Interface representing a manifest data structure. - - A manifest is effectively a dict mapping paths to entries. Each entry - consists of a binary node and extra flags affecting that entry. - """ - - def __getitem__(path): - """Returns the binary node value for a path in the manifest. - - Raises ``KeyError`` if the path does not exist in the manifest. - - Equivalent to ``self.find(path)[0]``. - """ - - def find(path): - """Returns the entry for a path in the manifest. - - Returns a 2-tuple of (node, flags). - - Raises ``KeyError`` if the path does not exist in the manifest. - """ - - def __len__(): - """Return the number of entries in the manifest.""" - - def __nonzero__(): - """Returns True if the manifest has entries, False otherwise.""" - - __bool__ = __nonzero__ - - def __setitem__(path, node): - """Define the node value for a path in the manifest. - - If the path is already in the manifest, its flags will be copied to - the new entry. - """ - - def __contains__(path): - """Whether a path exists in the manifest.""" - - def __delitem__(path): - """Remove a path from the manifest. - - Raises ``KeyError`` if the path is not in the manifest. - """ - - def __iter__(): - """Iterate over paths in the manifest.""" - - def iterkeys(): - """Iterate over paths in the manifest.""" - - def keys(): - """Obtain a list of paths in the manifest.""" - - def filesnotin(other, match=None): - """Obtain the set of paths in this manifest but not in another. - - ``match`` is an optional matcher function to be applied to both - manifests. - - Returns a set of paths. - """ - - def dirs(): - """Returns an object implementing the ``idirs`` interface.""" - - def hasdir(dir): - """Returns a bool indicating if a directory is in this manifest.""" - - def matches(match): - """Generate a new manifest filtered through a matcher. - - Returns an object conforming to the ``imanifestdict`` interface. - """ - - def walk(match): - """Generator of paths in manifest satisfying a matcher. - - This is equivalent to ``self.matches(match).iterkeys()`` except a new - manifest object is not created. - - If the matcher has explicit files listed and they don't exist in - the manifest, ``match.bad()`` is called for each missing file. - """ - - def diff(other, match=None, clean=False): - """Find differences between this manifest and another. - - This manifest is compared to ``other``. - - If ``match`` is provided, the two manifests are filtered against this - matcher and only entries satisfying the matcher are compared. - - If ``clean`` is True, unchanged files are included in the returned - object. - - Returns a dict with paths as keys and values of 2-tuples of 2-tuples of - the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)`` - represents the node and flags for this manifest and ``(node2, flag2)`` - are the same for the other manifest. - """ - - def setflag(path, flag): - """Set the flag value for a given path. - - Raises ``KeyError`` if the path is not already in the manifest. - """ - - def get(path, default=None): - """Obtain the node value for a path or a default value if missing.""" - - def flags(path, default=''): - """Return the flags value for a path or a default value if missing.""" - - def copy(): - """Return a copy of this manifest.""" - - def items(): - """Returns an iterable of (path, node) for items in this manifest.""" - - def iteritems(): - """Identical to items().""" - - def iterentries(): - """Returns an iterable of (path, node, flags) for this manifest. - - Similar to ``iteritems()`` except items are a 3-tuple and include - flags. - """ - - def text(): - """Obtain the raw data representation for this manifest. - - Result is used to create a manifest revision. - """ - - def fastdelta(base, changes): - """Obtain a delta between this manifest and another given changes. - - ``base`` in the raw data representation for another manifest. - - ``changes`` is an iterable of ``(path, to_delete)``. - - Returns a 2-tuple containing ``bytearray(self.text())`` and the - delta between ``base`` and this manifest. - """ - -class imanifestrevisionbase(interfaceutil.Interface): - """Base interface representing a single revision of a manifest. - - Should not be used as a primary interface: should always be inherited - as part of a larger interface. - """ - - def new(): - """Obtain a new manifest instance. - - Returns an object conforming to the ``imanifestrevisionwritable`` - interface. The instance will be associated with the same - ``imanifestlog`` collection as this instance. - """ - - def copy(): - """Obtain a copy of this manifest instance. - - Returns an object conforming to the ``imanifestrevisionwritable`` - interface. The instance will be associated with the same - ``imanifestlog`` collection as this instance. - """ - - def read(): - """Obtain the parsed manifest data structure. - - The returned object conforms to the ``imanifestdict`` interface. - """ - -class imanifestrevisionstored(imanifestrevisionbase): - """Interface representing a manifest revision committed to storage.""" - - def node(): - """The binary node for this manifest.""" - - parents = interfaceutil.Attribute( - """List of binary nodes that are parents for this manifest revision.""" - ) - - def readdelta(shallow=False): - """Obtain the manifest data structure representing changes from parent. - - This manifest is compared to its 1st parent. A new manifest representing - those differences is constructed. - - The returned object conforms to the ``imanifestdict`` interface. - """ - - def readfast(shallow=False): - """Calls either ``read()`` or ``readdelta()``. - - The faster of the two options is called. - """ - - def find(key): - """Calls self.read().find(key)``. - - Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``. - """ - -class imanifestrevisionwritable(imanifestrevisionbase): - """Interface representing a manifest revision that can be committed.""" - - def write(transaction, linkrev, p1node, p2node, added, removed, match=None): - """Add this revision to storage. - - Takes a transaction object, the changeset revision number it will - be associated with, its parent nodes, and lists of added and - removed paths. - - If match is provided, storage can choose not to inspect or write out - items that do not match. Storage is still required to be able to provide - the full manifest in the future for any directories written (these - manifests should not be "narrowed on disk"). - - Returns the binary node of the created revision. - """ - -class imanifeststorage(interfaceutil.Interface): - """Storage interface for manifest data.""" - - tree = interfaceutil.Attribute( - """The path to the directory this manifest tracks. - - The empty bytestring represents the root manifest. - """) - - index = interfaceutil.Attribute( - """An ``ifilerevisionssequence`` instance.""") - - indexfile = interfaceutil.Attribute( - """Path of revlog index file. - - TODO this is revlog specific and should not be exposed. - """) - - opener = interfaceutil.Attribute( - """VFS opener to use to access underlying files used for storage. - - TODO this is revlog specific and should not be exposed. - """) - - version = interfaceutil.Attribute( - """Revlog version number. - - TODO this is revlog specific and should not be exposed. - """) - - _generaldelta = interfaceutil.Attribute( - """Whether generaldelta storage is being used. - - TODO this is revlog specific and should not be exposed. - """) - - fulltextcache = interfaceutil.Attribute( - """Dict with cache of fulltexts. - - TODO this doesn't feel appropriate for the storage interface. - """) - - def __len__(): - """Obtain the number of revisions stored for this manifest.""" - - def __iter__(): - """Iterate over revision numbers for this manifest.""" - - def rev(node): - """Obtain the revision number given a binary node. - - Raises ``error.LookupError`` if the node is not known. - """ - - def node(rev): - """Obtain the node value given a revision number. - - Raises ``error.LookupError`` if the revision is not known. - """ - - def lookup(value): - """Attempt to resolve a value to a node. - - Value can be a binary node, hex node, revision number, or a bytes - that can be converted to an integer. - - Raises ``error.LookupError`` if a ndoe could not be resolved. - """ - - def parents(node): - """Returns a 2-tuple of parent nodes for a node. - - Values will be ``nullid`` if the parent is empty. - """ - - def parentrevs(rev): - """Like parents() but operates on revision numbers.""" - - def linkrev(rev): - """Obtain the changeset revision number a revision is linked to.""" - - def revision(node, _df=None, raw=False): - """Obtain fulltext data for a node.""" - - def revdiff(rev1, rev2): - """Obtain a delta between two revision numbers. - - The returned data is the result of ``bdiff.bdiff()`` on the raw - revision data. - """ - - def cmp(node, fulltext): - """Compare fulltext to another revision. - - Returns True if the fulltext is different from what is stored. - """ - - def emitrevisions(nodes, - nodesorder=None, - revisiondata=False, - assumehaveparentrevisions=False): - """Produce ``irevisiondelta`` describing revisions. - - See the documentation for ``ifiledata`` for more. - """ - - def addgroup(deltas, linkmapper, transaction, addrevisioncb=None): - """Process a series of deltas for storage. - - See the documentation in ``ifilemutation`` for more. - """ - - def rawsize(rev): - """Obtain the size of tracked data. - - Is equivalent to ``len(m.revision(node, raw=True))``. - - TODO this method is only used by upgrade code and may be removed. - """ - - def getstrippoint(minlink): - """Find minimum revision that must be stripped to strip a linkrev. - - See the documentation in ``ifilemutation`` for more. - """ - - def strip(minlink, transaction): - """Remove storage of items starting at a linkrev. - - See the documentation in ``ifilemutation`` for more. - """ - - def checksize(): - """Obtain the expected sizes of backing files. - - TODO this is used by verify and it should not be part of the interface. - """ - - def files(): - """Obtain paths that are backing storage for this manifest. - - TODO this is used by verify and there should probably be a better API - for this functionality. - """ - - def deltaparent(rev): - """Obtain the revision that a revision is delta'd against. - - TODO delta encoding is an implementation detail of storage and should - not be exposed to the storage interface. - """ - - def clone(tr, dest, **kwargs): - """Clone this instance to another.""" - - def clearcaches(clear_persisted_data=False): - """Clear any caches associated with this instance.""" - - def dirlog(d): - """Obtain a manifest storage instance for a tree.""" - - def add(m, transaction, link, p1, p2, added, removed, readtree=None, - match=None): - """Add a revision to storage. - - ``m`` is an object conforming to ``imanifestdict``. - - ``link`` is the linkrev revision number. - - ``p1`` and ``p2`` are the parent revision numbers. - - ``added`` and ``removed`` are iterables of added and removed paths, - respectively. - - ``readtree`` is a function that can be used to read the child tree(s) - when recursively writing the full tree structure when using - treemanifets. - - ``match`` is a matcher that can be used to hint to storage that not all - paths must be inspected; this is an optimization and can be safely - ignored. Note that the storage must still be able to reproduce a full - manifest including files that did not match. - """ - - def storageinfo(exclusivefiles=False, sharedfiles=False, - revisionscount=False, trackedsize=False, - storedsize=False): - """Obtain information about storage for this manifest's data. - - See ``ifilestorage.storageinfo()`` for a description of this method. - This one behaves the same way, except for manifest data. - """ - -class imanifestlog(interfaceutil.Interface): - """Interface representing a collection of manifest snapshots. - - Represents the root manifest in a repository. - - Also serves as a means to access nested tree manifests and to cache - tree manifests. - """ - - def __getitem__(node): - """Obtain a manifest instance for a given binary node. - - Equivalent to calling ``self.get('', node)``. - - The returned object conforms to the ``imanifestrevisionstored`` - interface. - """ - - def get(tree, node, verify=True): - """Retrieve the manifest instance for a given directory and binary node. - - ``node`` always refers to the node of the root manifest (which will be - the only manifest if flat manifests are being used). - - If ``tree`` is the empty string, the root manifest is returned. - Otherwise the manifest for the specified directory will be returned - (requires tree manifests). - - If ``verify`` is True, ``LookupError`` is raised if the node is not - known. - - The returned object conforms to the ``imanifestrevisionstored`` - interface. - """ - - def getstorage(tree): - """Retrieve an interface to storage for a particular tree. - - If ``tree`` is the empty bytestring, storage for the root manifest will - be returned. Otherwise storage for a tree manifest is returned. - - TODO formalize interface for returned object. - """ - - def clearcaches(): - """Clear caches associated with this collection.""" - - def rev(node): - """Obtain the revision number for a binary node. - - Raises ``error.LookupError`` if the node is not known. - """ - -class ilocalrepositoryfilestorage(interfaceutil.Interface): - """Local repository sub-interface providing access to tracked file storage. - - This interface defines how a repository accesses storage for a single - tracked file path. - """ - - def file(f): - """Obtain a filelog for a tracked path. - - The returned type conforms to the ``ifilestorage`` interface. - """ - -class ilocalrepositorymain(interfaceutil.Interface): - """Main interface for local repositories. - - This currently captures the reality of things - not how things should be. - """ - - supportedformats = interfaceutil.Attribute( - """Set of requirements that apply to stream clone. - - This is actually a class attribute and is shared among all instances. - """) - - supported = interfaceutil.Attribute( - """Set of requirements that this repo is capable of opening.""") - - requirements = interfaceutil.Attribute( - """Set of requirements this repo uses.""") - - features = interfaceutil.Attribute( - """Set of "features" this repository supports. - - A "feature" is a loosely-defined term. It can refer to a feature - in the classical sense or can describe an implementation detail - of the repository. For example, a ``readonly`` feature may denote - the repository as read-only. Or a ``revlogfilestore`` feature may - denote that the repository is using revlogs for file storage. - - The intent of features is to provide a machine-queryable mechanism - for repo consumers to test for various repository characteristics. - - Features are similar to ``requirements``. The main difference is that - requirements are stored on-disk and represent requirements to open the - repository. Features are more run-time capabilities of the repository - and more granular capabilities (which may be derived from requirements). - """) - - filtername = interfaceutil.Attribute( - """Name of the repoview that is active on this repo.""") - - wvfs = interfaceutil.Attribute( - """VFS used to access the working directory.""") - - vfs = interfaceutil.Attribute( - """VFS rooted at the .hg directory. - - Used to access repository data not in the store. - """) - - svfs = interfaceutil.Attribute( - """VFS rooted at the store. - - Used to access repository data in the store. Typically .hg/store. - But can point elsewhere if the store is shared. - """) - - root = interfaceutil.Attribute( - """Path to the root of the working directory.""") - - path = interfaceutil.Attribute( - """Path to the .hg directory.""") - - origroot = interfaceutil.Attribute( - """The filesystem path that was used to construct the repo.""") - - auditor = interfaceutil.Attribute( - """A pathauditor for the working directory. - - This checks if a path refers to a nested repository. - - Operates on the filesystem. - """) - - nofsauditor = interfaceutil.Attribute( - """A pathauditor for the working directory. - - This is like ``auditor`` except it doesn't do filesystem checks. - """) - - baseui = interfaceutil.Attribute( - """Original ui instance passed into constructor.""") - - ui = interfaceutil.Attribute( - """Main ui instance for this instance.""") - - sharedpath = interfaceutil.Attribute( - """Path to the .hg directory of the repo this repo was shared from.""") - - store = interfaceutil.Attribute( - """A store instance.""") - - spath = interfaceutil.Attribute( - """Path to the store.""") - - sjoin = interfaceutil.Attribute( - """Alias to self.store.join.""") - - cachevfs = interfaceutil.Attribute( - """A VFS used to access the cache directory. - - Typically .hg/cache. - """) - - wcachevfs = interfaceutil.Attribute( - """A VFS used to access the cache directory dedicated to working copy - - Typically .hg/wcache. - """) - - filteredrevcache = interfaceutil.Attribute( - """Holds sets of revisions to be filtered.""") - - names = interfaceutil.Attribute( - """A ``namespaces`` instance.""") - - def close(): - """Close the handle on this repository.""" - - def peer(): - """Obtain an object conforming to the ``peer`` interface.""" - - def unfiltered(): - """Obtain an unfiltered/raw view of this repo.""" - - def filtered(name, visibilityexceptions=None): - """Obtain a named view of this repository.""" - - obsstore = interfaceutil.Attribute( - """A store of obsolescence data.""") - - changelog = interfaceutil.Attribute( - """A handle on the changelog revlog.""") - - manifestlog = interfaceutil.Attribute( - """An instance conforming to the ``imanifestlog`` interface. - - Provides access to manifests for the repository. - """) - - dirstate = interfaceutil.Attribute( - """Working directory state.""") - - narrowpats = interfaceutil.Attribute( - """Matcher patterns for this repository's narrowspec.""") - - def narrowmatch(match=None, includeexact=False): - """Obtain a matcher for the narrowspec.""" - - def setnarrowpats(newincludes, newexcludes): - """Define the narrowspec for this repository.""" - - def __getitem__(changeid): - """Try to resolve a changectx.""" - - def __contains__(changeid): - """Whether a changeset exists.""" - - def __nonzero__(): - """Always returns True.""" - return True - - __bool__ = __nonzero__ - - def __len__(): - """Returns the number of changesets in the repo.""" - - def __iter__(): - """Iterate over revisions in the changelog.""" - - def revs(expr, *args): - """Evaluate a revset. - - Emits revisions. - """ - - def set(expr, *args): - """Evaluate a revset. - - Emits changectx instances. - """ - - def anyrevs(specs, user=False, localalias=None): - """Find revisions matching one of the given revsets.""" - - def url(): - """Returns a string representing the location of this repo.""" - - def hook(name, throw=False, **args): - """Call a hook.""" - - def tags(): - """Return a mapping of tag to node.""" - - def tagtype(tagname): - """Return the type of a given tag.""" - - def tagslist(): - """Return a list of tags ordered by revision.""" - - def nodetags(node): - """Return the tags associated with a node.""" - - def nodebookmarks(node): - """Return the list of bookmarks pointing to the specified node.""" - - def branchmap(): - """Return a mapping of branch to heads in that branch.""" - - def revbranchcache(): - pass - - def branchtip(branchtip, ignoremissing=False): - """Return the tip node for a given branch.""" - - def lookup(key): - """Resolve the node for a revision.""" - - def lookupbranch(key): - """Look up the branch name of the given revision or branch name.""" - - def known(nodes): - """Determine whether a series of nodes is known. - - Returns a list of bools. - """ - - def local(): - """Whether the repository is local.""" - return True - - def publishing(): - """Whether the repository is a publishing repository.""" - - def cancopy(): - pass - - def shared(): - """The type of shared repository or None.""" - - def wjoin(f, *insidef): - """Calls self.vfs.reljoin(self.root, f, *insidef)""" - - def setparents(p1, p2): - """Set the parent nodes of the working directory.""" - - def filectx(path, changeid=None, fileid=None): - """Obtain a filectx for the given file revision.""" - - def getcwd(): - """Obtain the current working directory from the dirstate.""" - - def pathto(f, cwd=None): - """Obtain the relative path to a file.""" - - def adddatafilter(name, fltr): - pass - - def wread(filename): - """Read a file from wvfs, using data filters.""" - - def wwrite(filename, data, flags, backgroundclose=False, **kwargs): - """Write data to a file in the wvfs, using data filters.""" - - def wwritedata(filename, data): - """Resolve data for writing to the wvfs, using data filters.""" - - def currenttransaction(): - """Obtain the current transaction instance or None.""" - - def transaction(desc, report=None): - """Open a new transaction to write to the repository.""" - - def undofiles(): - """Returns a list of (vfs, path) for files to undo transactions.""" - - def recover(): - """Roll back an interrupted transaction.""" - - def rollback(dryrun=False, force=False): - """Undo the last transaction. - - DANGEROUS. - """ - - def updatecaches(tr=None, full=False): - """Warm repo caches.""" - - def invalidatecaches(): - """Invalidate cached data due to the repository mutating.""" - - def invalidatevolatilesets(): - pass - - def invalidatedirstate(): - """Invalidate the dirstate.""" - - def invalidate(clearfilecache=False): - pass - - def invalidateall(): - pass - - def lock(wait=True): - """Lock the repository store and return a lock instance.""" - - def wlock(wait=True): - """Lock the non-store parts of the repository.""" - - def currentwlock(): - """Return the wlock if it's held or None.""" - - def checkcommitpatterns(wctx, vdirs, match, status, fail): - pass - - def commit(text='', user=None, date=None, match=None, force=False, - editor=False, extra=None): - """Add a new revision to the repository.""" - - def commitctx(ctx, error=False, origctx=None): - """Commit a commitctx instance to the repository.""" - - def destroying(): - """Inform the repository that nodes are about to be destroyed.""" - - def destroyed(): - """Inform the repository that nodes have been destroyed.""" - - def status(node1='.', node2=None, match=None, ignored=False, - clean=False, unknown=False, listsubrepos=False): - """Convenience method to call repo[x].status().""" - - def addpostdsstatus(ps): - pass - - def postdsstatus(): - pass - - def clearpostdsstatus(): - pass - - def heads(start=None): - """Obtain list of nodes that are DAG heads.""" - - def branchheads(branch=None, start=None, closed=False): - pass - - def branches(nodes): - pass - - def between(pairs): - pass - - def checkpush(pushop): - pass - - prepushoutgoinghooks = interfaceutil.Attribute( - """util.hooks instance.""") - - def pushkey(namespace, key, old, new): - pass - - def listkeys(namespace): - pass - - def debugwireargs(one, two, three=None, four=None, five=None): - pass - - def savecommitmessage(text): - pass - -class completelocalrepository(ilocalrepositorymain, - ilocalrepositoryfilestorage): - """Complete interface for a local repository.""" - -class iwireprotocolcommandcacher(interfaceutil.Interface): - """Represents a caching backend for wire protocol commands. - - Wire protocol version 2 supports transparent caching of many commands. - To leverage this caching, servers can activate objects that cache - command responses. Objects handle both cache writing and reading. - This interface defines how that response caching mechanism works. - - Wire protocol version 2 commands emit a series of objects that are - serialized and sent to the client. The caching layer exists between - the invocation of the command function and the sending of its output - objects to an output layer. - - Instances of this interface represent a binding to a cache that - can serve a response (in place of calling a command function) and/or - write responses to a cache for subsequent use. - - When a command request arrives, the following happens with regards - to this interface: - - 1. The server determines whether the command request is cacheable. - 2. If it is, an instance of this interface is spawned. - 3. The cacher is activated in a context manager (``__enter__`` is called). - 4. A cache *key* for that request is derived. This will call the - instance's ``adjustcachekeystate()`` method so the derivation - can be influenced. - 5. The cacher is informed of the derived cache key via a call to - ``setcachekey()``. - 6. The cacher's ``lookup()`` method is called to test for presence of - the derived key in the cache. - 7. If ``lookup()`` returns a hit, that cached result is used in place - of invoking the command function. ``__exit__`` is called and the instance - is discarded. - 8. The command function is invoked. - 9. ``onobject()`` is called for each object emitted by the command - function. - 10. After the final object is seen, ``onfinished()`` is called. - 11. ``__exit__`` is called to signal the end of use of the instance. - - Cache *key* derivation can be influenced by the instance. - - Cache keys are initially derived by a deterministic representation of - the command request. This includes the command name, arguments, protocol - version, etc. This initial key derivation is performed by CBOR-encoding a - data structure and feeding that output into a hasher. - - Instances of this interface can influence this initial key derivation - via ``adjustcachekeystate()``. - - The instance is informed of the derived cache key via a call to - ``setcachekey()``. The instance must store the key locally so it can - be consulted on subsequent operations that may require it. - - When constructed, the instance has access to a callable that can be used - for encoding response objects. This callable receives as its single - argument an object emitted by a command function. It returns an iterable - of bytes chunks representing the encoded object. Unless the cacher is - caching native Python objects in memory or has a way of reconstructing - the original Python objects, implementations typically call this function - to produce bytes from the output objects and then store those bytes in - the cache. When it comes time to re-emit those bytes, they are wrapped - in a ``wireprototypes.encodedresponse`` instance to tell the output - layer that they are pre-encoded. - - When receiving the objects emitted by the command function, instances - can choose what to do with those objects. The simplest thing to do is - re-emit the original objects. They will be forwarded to the output - layer and will be processed as if the cacher did not exist. - - Implementations could also choose to not emit objects - instead locally - buffering objects or their encoded representation. They could then emit - a single "coalesced" object when ``onfinished()`` is called. In - this way, the implementation would function as a filtering layer of - sorts. - - When caching objects, typically the encoded form of the object will - be stored. Keep in mind that if the original object is forwarded to - the output layer, it will need to be encoded there as well. For large - output, this redundant encoding could add overhead. Implementations - could wrap the encoded object data in ``wireprototypes.encodedresponse`` - instances to avoid this overhead. - """ - def __enter__(): - """Marks the instance as active. - - Should return self. - """ - - def __exit__(exctype, excvalue, exctb): - """Called when cacher is no longer used. - - This can be used by implementations to perform cleanup actions (e.g. - disconnecting network sockets, aborting a partially cached response. - """ - - def adjustcachekeystate(state): - """Influences cache key derivation by adjusting state to derive key. - - A dict defining the state used to derive the cache key is passed. - - Implementations can modify this dict to record additional state that - is wanted to influence key derivation. - - Implementations are *highly* encouraged to not modify or delete - existing keys. - """ - - def setcachekey(key): - """Record the derived cache key for this request. - - Instances may mutate the key for internal usage, as desired. e.g. - instances may wish to prepend the repo name, introduce path - components for filesystem or URL addressing, etc. Behavior is up to - the cache. - - Returns a bool indicating if the request is cacheable by this - instance. - """ - - def lookup(): - """Attempt to resolve an entry in the cache. - - The instance is instructed to look for the cache key that it was - informed about via the call to ``setcachekey()``. - - If there's no cache hit or the cacher doesn't wish to use the cached - entry, ``None`` should be returned. - - Else, a dict defining the cached result should be returned. The - dict may have the following keys: - - objs - An iterable of objects that should be sent to the client. That - iterable of objects is expected to be what the command function - would return if invoked or an equivalent representation thereof. - """ - - def onobject(obj): - """Called when a new object is emitted from the command function. - - Receives as its argument the object that was emitted from the - command function. - - This method returns an iterator of objects to forward to the output - layer. The easiest implementation is a generator that just - ``yield obj``. - """ - - def onfinished(): - """Called after all objects have been emitted from the command function. - - Implementations should return an iterator of objects to forward to - the output layer. - - This method can be a generator. - """
--- a/mercurial/revlog.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/revlog.py Mon Sep 09 17:26:17 2019 -0400 @@ -38,13 +38,6 @@ from .revlogutils.constants import ( FLAG_GENERALDELTA, FLAG_INLINE_DATA, - REVIDX_DEFAULT_FLAGS, - REVIDX_ELLIPSIS, - REVIDX_EXTSTORED, - REVIDX_FLAGS_ORDER, - REVIDX_ISCENSORED, - REVIDX_KNOWN_FLAGS, - REVIDX_RAWTEXT_CHANGING_FLAGS, REVLOGV0, REVLOGV1, REVLOGV1_FLAGS, @@ -54,6 +47,14 @@ REVLOG_DEFAULT_FORMAT, REVLOG_DEFAULT_VERSION, ) +from .revlogutils.flagutil import ( + REVIDX_DEFAULT_FLAGS, + REVIDX_ELLIPSIS, + REVIDX_EXTSTORED, + REVIDX_FLAGS_ORDER, + REVIDX_ISCENSORED, + REVIDX_RAWTEXT_CHANGING_FLAGS, +) from .thirdparty import ( attr, ) @@ -64,15 +65,18 @@ mdiff, policy, pycompat, - repository, templatefilters, util, ) +from .interfaces import ( + repository, + util as interfaceutil, +) from .revlogutils import ( deltas as deltautil, + flagutil, ) from .utils import ( - interfaceutil, storageutil, stringutil, ) @@ -94,7 +98,6 @@ REVIDX_EXTSTORED REVIDX_DEFAULT_FLAGS REVIDX_FLAGS_ORDER -REVIDX_KNOWN_FLAGS REVIDX_RAWTEXT_CHANGING_FLAGS parsers = policy.importmod(r'parsers') @@ -108,11 +111,6 @@ _maxinline = 131072 _chunksize = 1048576 -# Store flag processors (cf. 'addflagprocessor()' to register) -_flagprocessors = { - REVIDX_ISCENSORED: None, -} - # Flag processors for REVIDX_ELLIPSIS. def ellipsisreadprocessor(rl, text): return text, False @@ -129,45 +127,6 @@ ellipsisrawprocessor, ) -def addflagprocessor(flag, processor): - """Register a flag processor on a revision data flag. - - Invariant: - - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER, - and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext. - - Only one flag processor can be registered on a specific flag. - - flagprocessors must be 3-tuples of functions (read, write, raw) with the - following signatures: - - (read) f(self, rawtext) -> text, bool - - (write) f(self, text) -> rawtext, bool - - (raw) f(self, rawtext) -> bool - "text" is presented to the user. "rawtext" is stored in revlog data, not - directly visible to the user. - The boolean returned by these transforms is used to determine whether - the returned text can be used for hash integrity checking. For example, - if "write" returns False, then "text" is used to generate hash. If - "write" returns True, that basically means "rawtext" returned by "write" - should be used to generate hash. Usually, "write" and "read" return - different booleans. And "raw" returns a same boolean as "write". - - Note: The 'raw' transform is used for changegroup generation and in some - debug commands. In this case the transform only indicates whether the - contents can be used for hash integrity checks. - """ - _insertflagprocessor(flag, processor, _flagprocessors) - -def _insertflagprocessor(flag, processor, flagprocessors): - if not flag & REVIDX_KNOWN_FLAGS: - msg = _("cannot register processor on unknown flag '%#x'.") % (flag) - raise error.ProgrammingError(msg) - if flag not in REVIDX_FLAGS_ORDER: - msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag) - raise error.ProgrammingError(msg) - if flag in flagprocessors: - msg = _("cannot register multiple processors on flag '%#x'.") % (flag) - raise error.Abort(msg) - flagprocessors[flag] = processor - def getoffset(q): return int(q >> 16) @@ -175,7 +134,7 @@ return int(q & 0xFFFF) def offset_type(offset, type): - if (type & ~REVIDX_KNOWN_FLAGS) != 0: + if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0: raise ValueError('unknown revlog index flags') return int(int(offset) << 16 | type) @@ -302,7 +261,7 @@ p = versionformat_pack(version) + p[4:] return p -class revlog(object): +class revlog(flagutil.flagprocessorsmixin): """ the underlying revision storage object @@ -384,7 +343,7 @@ # Make copy of flag processors so each revlog instance can support # custom flags. - self._flagprocessors = dict(_flagprocessors) + self._flagprocessors = dict(flagutil.flagprocessors) # 2-tuple of file handles being used for active writing. self._writinghandles = None @@ -442,7 +401,7 @@ # revlog v0 doesn't have flag processors for flag, processor in opts.get(b'flagprocessors', {}).iteritems(): - _insertflagprocessor(flag, processor, self._flagprocessors) + flagutil.insertflagprocessor(flag, processor, self._flagprocessors) if self._chunkcachesize <= 0: raise error.RevlogError(_('revlog chunk cache size %r is not ' @@ -679,7 +638,7 @@ if l >= 0: return l - t = self.revision(rev, raw=True) + t = self.rawdata(rev) return len(t) def size(self, rev): @@ -687,7 +646,7 @@ # fast path: if no "read" flag processor could change the content, # size is rawsize. note: ELLIPSIS is known to not change the content. flags = self.flags(rev) - if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0: + if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0: return self.rawsize(rev) return len(self.revision(rev, raw=False)) @@ -1639,8 +1598,8 @@ if rev1 != nullrev and self.deltaparent(rev2) == rev1: return bytes(self._chunk(rev2)) - return mdiff.textdiff(self.revision(rev1, raw=True), - self.revision(rev2, raw=True)) + return mdiff.textdiff(self.rawdata(rev1), + self.rawdata(rev2)) def revision(self, nodeorrev, _df=None, raw=False): """return an uncompressed revision of a given node or revision @@ -1651,6 +1610,14 @@ treated as raw data when applying flag transforms. 'raw' should be set to True when generating changegroups or in debug commands. """ + if raw: + msg = ('revlog.revision(..., raw=True) is deprecated, ' + 'use revlog.rawdata(...)') + util.nouideprecwarn(msg, '5.2', stacklevel=2) + return self._revisiondata(nodeorrev, _df, raw=raw) + + def _revisiondata(self, nodeorrev, _df=None, raw=False): + # deal with <nodeorrev> argument type if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) @@ -1658,64 +1625,91 @@ node = nodeorrev rev = None - cachedrev = None - flags = None - rawtext = None + # fast path the special `nullid` rev if node == nullid: return "" + + # The text as stored inside the revlog. Might be the revision or might + # need to be processed to retrieve the revision. + rawtext = None + + rev, rawtext, validated = self._rawtext(node, rev, _df=_df) + + if raw and validated: + # if we don't want to process the raw text and that raw + # text is cached, we can exit early. + return rawtext + if rev is None: + rev = self.rev(node) + # the revlog's flag for this revision + # (usually alter its state or content) + flags = self.flags(rev) + + if validated and flags == REVIDX_DEFAULT_FLAGS: + # no extra flags set, no flag processor runs, text = rawtext + return rawtext + + if raw: + validatehash = self._processflagsraw(rawtext, flags) + text = rawtext + else: + text, validatehash = self._processflagsread(rawtext, flags) + if validatehash: + self.checkhash(text, node, rev=rev) + if not validated: + self._revisioncache = (node, rev, rawtext) + + return text + + def _rawtext(self, node, rev, _df=None): + """return the possibly unvalidated rawtext for a revision + + returns (rev, rawtext, validated) + """ + + # revision in the cache (could be useful to apply delta) + cachedrev = None + # An intermediate text to apply deltas to + basetext = None + + # Check if we have the entry in cache + # The cache entry looks like (node, rev, rawtext) if self._revisioncache: if self._revisioncache[0] == node: - # _cache only stores rawtext - if raw: - return self._revisioncache[2] - # duplicated, but good for perf - if rev is None: - rev = self.rev(node) - if flags is None: - flags = self.flags(rev) - # no extra flags set, no flag processor runs, text = rawtext - if flags == REVIDX_DEFAULT_FLAGS: - return self._revisioncache[2] - # rawtext is reusable. need to run flag processor - rawtext = self._revisioncache[2] - + return (rev, self._revisioncache[2], True) cachedrev = self._revisioncache[1] - # look up what we need to read - if rawtext is None: - if rev is None: - rev = self.rev(node) - - chain, stopped = self._deltachain(rev, stoprev=cachedrev) - if stopped: - rawtext = self._revisioncache[2] - - # drop cache to save memory - self._revisioncache = None - - targetsize = None - rawsize = self.index[rev][2] - if 0 <= rawsize: - targetsize = 4 * rawsize - - bins = self._chunks(chain, df=_df, targetsize=targetsize) - if rawtext is None: - rawtext = bytes(bins[0]) - bins = bins[1:] - - rawtext = mdiff.patches(rawtext, bins) - self._revisioncache = (node, rev, rawtext) - - if flags is None: - if rev is None: - rev = self.rev(node) - flags = self.flags(rev) - - text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw) - if validatehash: - self.checkhash(text, node, rev=rev) - - return text + if rev is None: + rev = self.rev(node) + + chain, stopped = self._deltachain(rev, stoprev=cachedrev) + if stopped: + basetext = self._revisioncache[2] + + # drop cache to save memory, the caller is expected to + # update self._revisioncache after validating the text + self._revisioncache = None + + targetsize = None + rawsize = self.index[rev][2] + if 0 <= rawsize: + targetsize = 4 * rawsize + + bins = self._chunks(chain, df=_df, targetsize=targetsize) + if basetext is None: + basetext = bytes(bins[0]) + bins = bins[1:] + + rawtext = mdiff.patches(basetext, bins) + del basetext # let us have a chance to free memory early + return (rev, rawtext, False) + + def rawdata(self, nodeorrev, _df=None): + """return an uncompressed raw data of a given node or revision number. + + _df - an existing file handle to read from. (internal-only) + """ + return self._revisiondata(nodeorrev, _df, raw=True) def hash(self, text, p1, p2): """Compute a node hash. @@ -1725,69 +1719,6 @@ """ return storageutil.hashrevisionsha1(text, p1, p2) - def _processflags(self, text, flags, operation, raw=False): - """Inspect revision data flags and applies transforms defined by - registered flag processors. - - ``text`` - the revision data to process - ``flags`` - the revision flags - ``operation`` - the operation being performed (read or write) - ``raw`` - an optional argument describing if the raw transform should be - applied. - - This method processes the flags in the order (or reverse order if - ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the - flag processors registered for present flags. The order of flags defined - in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity. - - Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the - processed text and ``validatehash`` is a bool indicating whether the - returned text should be checked for hash integrity. - - Note: If the ``raw`` argument is set, it has precedence over the - operation and will only update the value of ``validatehash``. - """ - # fast path: no flag processors will run - if flags == 0: - return text, True - if not operation in ('read', 'write'): - raise error.ProgrammingError(_("invalid '%s' operation") % - operation) - # Check all flags are known. - if flags & ~REVIDX_KNOWN_FLAGS: - raise error.RevlogError(_("incompatible revision flag '%#x'") % - (flags & ~REVIDX_KNOWN_FLAGS)) - validatehash = True - # Depending on the operation (read or write), the order might be - # reversed due to non-commutative transforms. - orderedflags = REVIDX_FLAGS_ORDER - if operation == 'write': - orderedflags = reversed(orderedflags) - - for flag in orderedflags: - # If a flagprocessor has been registered for a known flag, apply the - # related operation transform and update result tuple. - if flag & flags: - vhash = True - - if flag not in self._flagprocessors: - message = _("missing processor for flag '%#x'") % (flag) - raise error.RevlogError(message) - - processor = self._flagprocessors[flag] - if processor is not None: - readtransform, writetransform, rawtransform = processor - - if raw: - vhash = rawtransform(self, text) - elif operation == 'read': - text, vhash = readtransform(self, text) - else: # write operation - text, vhash = writetransform(self, text) - validatehash = validatehash and vhash - - return text, validatehash - def checkhash(self, text, node, p1=None, p2=None, rev=None): """Check node hash integrity. @@ -1896,7 +1827,7 @@ if flags: node = node or self.hash(text, p1, p2) - rawtext, validatehash = self._processflags(text, flags, 'write') + rawtext, validatehash = self._processflagswrite(text, flags) # If the flag processor modifies the revision data, ignore any provided # cachedelta. @@ -2461,13 +2392,14 @@ # the revlog chunk is a delta. cachedelta = None rawtext = None - if destrevlog._lazydelta: + if (deltareuse != self.DELTAREUSEFULLADD + and destrevlog._lazydelta): dp = self.deltaparent(rev) if dp != nullrev: cachedelta = (dp, bytes(self._chunk(rev))) if not cachedelta: - rawtext = self.revision(rev, raw=True) + rawtext = self.rawdata(rev) if deltareuse == self.DELTAREUSEFULLADD: @@ -2545,7 +2477,7 @@ 'revision having delta stored')) rawtext = self._chunk(rev) else: - rawtext = self.revision(rev, raw=True) + rawtext = self.rawdata(rev) newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)) @@ -2603,8 +2535,8 @@ # rawtext[0:2]=='\1\n'| False | True | True | ? # # "rawtext" means the raw text stored in revlog data, which - # could be retrieved by "revision(rev, raw=True)". "text" - # mentioned below is "revision(rev, raw=False)". + # could be retrieved by "rawdata(rev)". "text" + # mentioned below is "revision(rev)". # # There are 3 different lengths stored physically: # 1. L1: rawsize, stored in revlog index @@ -2614,7 +2546,7 @@ # # L1 should be equal to L2. L3 could be different from them. # "text" may or may not affect commit hash depending on flag - # processors (see revlog.addflagprocessor). + # processors (see flagutil.addflagprocessor). # # | common | rename | meta | ext # ------------------------------------------------- @@ -2646,7 +2578,7 @@ self.revision(node) l1 = self.rawsize(rev) - l2 = len(self.revision(node, raw=True)) + l2 = len(self.rawdata(node)) if l1 != l2: yield revlogproblem(
--- a/mercurial/revlogutils/constants.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/revlogutils/constants.py Mon Sep 09 17:26:17 2019 -0400 @@ -9,9 +9,8 @@ from __future__ import absolute_import -from .. import ( +from ..interfaces import ( repository, - util, ) # revlog header flags @@ -48,7 +47,7 @@ REVIDX_ELLIPSIS, REVIDX_EXTSTORED, ] -REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER) + # bitmark for flags that could cause rawdata content change REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
--- a/mercurial/revlogutils/deltas.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/revlogutils/deltas.py Mon Sep 09 17:26:17 2019 -0400 @@ -521,8 +521,7 @@ fulltext = mdiff.patch(basetext, delta) try: - res = revlog._processflags(fulltext, flags, 'read', raw=True) - fulltext, validatehash = res + validatehash = revlog._processflagsraw(fulltext, flags) if validatehash: revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2) if flags & REVIDX_ISCENSORED: @@ -925,7 +924,7 @@ header = mdiff.replacediffheader(revlog.rawsize(base), len(t)) delta = header + t else: - ptext = revlog.revision(base, _df=fh, raw=True) + ptext = revlog.rawdata(base, _df=fh) delta = mdiff.textdiff(ptext, t) return delta
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/revlogutils/flagutil.py Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,199 @@ +# flagutils.py - code to deal with revlog flags and their processors +# +# Copyright 2016 Remi Chaintron <remi@fb.com> +# Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from ..i18n import _ + +from .constants import ( + REVIDX_DEFAULT_FLAGS, + REVIDX_ELLIPSIS, + REVIDX_EXTSTORED, + REVIDX_FLAGS_ORDER, + REVIDX_ISCENSORED, + REVIDX_RAWTEXT_CHANGING_FLAGS, +) + +from .. import ( + error, + util +) + +# blanked usage of all the name to prevent pyflakes constraints +# We need these name available in the module for extensions. +REVIDX_ISCENSORED +REVIDX_ELLIPSIS +REVIDX_EXTSTORED +REVIDX_DEFAULT_FLAGS +REVIDX_FLAGS_ORDER +REVIDX_RAWTEXT_CHANGING_FLAGS + +REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER) + +# Store flag processors (cf. 'addflagprocessor()' to register) +flagprocessors = { + REVIDX_ISCENSORED: None, +} + +def addflagprocessor(flag, processor): + """Register a flag processor on a revision data flag. + + Invariant: + - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER, + and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext. + - Only one flag processor can be registered on a specific flag. + - flagprocessors must be 3-tuples of functions (read, write, raw) with the + following signatures: + - (read) f(self, rawtext) -> text, bool + - (write) f(self, text) -> rawtext, bool + - (raw) f(self, rawtext) -> bool + "text" is presented to the user. "rawtext" is stored in revlog data, not + directly visible to the user. + The boolean returned by these transforms is used to determine whether + the returned text can be used for hash integrity checking. For example, + if "write" returns False, then "text" is used to generate hash. If + "write" returns True, that basically means "rawtext" returned by "write" + should be used to generate hash. Usually, "write" and "read" return + different booleans. And "raw" returns a same boolean as "write". + + Note: The 'raw' transform is used for changegroup generation and in some + debug commands. In this case the transform only indicates whether the + contents can be used for hash integrity checks. + """ + insertflagprocessor(flag, processor, flagprocessors) + +def insertflagprocessor(flag, processor, flagprocessors): + if not flag & REVIDX_KNOWN_FLAGS: + msg = _("cannot register processor on unknown flag '%#x'.") % (flag) + raise error.ProgrammingError(msg) + if flag not in REVIDX_FLAGS_ORDER: + msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag) + raise error.ProgrammingError(msg) + if flag in flagprocessors: + msg = _("cannot register multiple processors on flag '%#x'.") % (flag) + raise error.Abort(msg) + flagprocessors[flag] = processor + +class flagprocessorsmixin(object): + """basic mixin to support revlog flag processing + + Make sure the `_flagprocessors` attribute is set at ``__init__`` time. + + See the documentation of the ``_processflags`` method for details. + """ + + _flagserrorclass = error.RevlogError + + def _processflags(self, text, flags, operation, raw=False): + """deprecated entry point to access flag processors""" + msg = ('_processflag(...) use the specialized variant') + util.nouideprecwarn(msg, '5.2', stacklevel=2) + if raw: + return text, self._processflagsraw(text, flags) + elif operation == 'read': + return self._processflagsread(text, flags) + else: # write operation + return self._processflagswrite(text, flags) + + def _processflagsread(self, text, flags): + """Inspect revision data flags and applies read transformations defined + by registered flag processors. + + ``text`` - the revision data to process + ``flags`` - the revision flags + ``raw`` - an optional argument describing if the raw transform should be + applied. + + This method processes the flags in the order (or reverse order if + ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the + flag processors registered for present flags. The order of flags defined + in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity. + + Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the + processed text and ``validatehash`` is a bool indicating whether the + returned text should be checked for hash integrity. + """ + return self._processflagsfunc(text, flags, 'read') + + def _processflagswrite(self, text, flags): + """Inspect revision data flags and applies write transformations defined + by registered flag processors. + + ``text`` - the revision data to process + ``flags`` - the revision flags + + This method processes the flags in the order (or reverse order if + ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the + flag processors registered for present flags. The order of flags defined + in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity. + + Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the + processed text and ``validatehash`` is a bool indicating whether the + returned text should be checked for hash integrity. + """ + return self._processflagsfunc(text, flags, 'write') + + def _processflagsraw(self, text, flags): + """Inspect revision data flags to check is the content hash should be + validated. + + ``text`` - the revision data to process + ``flags`` - the revision flags + + This method processes the flags in the order (or reverse order if + ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the + flag processors registered for present flags. The order of flags defined + in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity. + + Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the + processed text and ``validatehash`` is a bool indicating whether the + returned text should be checked for hash integrity. + """ + return self._processflagsfunc(text, flags, 'raw')[1] + + def _processflagsfunc(self, text, flags, operation): + # fast path: no flag processors will run + if flags == 0: + return text, True + if operation not in ('read', 'write', 'raw'): + raise error.ProgrammingError(_("invalid '%s' operation") % + operation) + # Check all flags are known. + if flags & ~REVIDX_KNOWN_FLAGS: + raise self._flagserrorclass(_("incompatible revision flag '%#x'") % + (flags & ~REVIDX_KNOWN_FLAGS)) + validatehash = True + # Depending on the operation (read or write), the order might be + # reversed due to non-commutative transforms. + orderedflags = REVIDX_FLAGS_ORDER + if operation == 'write': + orderedflags = reversed(orderedflags) + + for flag in orderedflags: + # If a flagprocessor has been registered for a known flag, apply the + # related operation transform and update result tuple. + if flag & flags: + vhash = True + + if flag not in self._flagprocessors: + message = _("missing processor for flag '%#x'") % (flag) + raise self._flagserrorclass(message) + + processor = self._flagprocessors[flag] + if processor is not None: + readtransform, writetransform, rawtransform = processor + + if operation == 'raw': + vhash = rawtransform(self, text) + elif operation == 'read': + text, vhash = readtransform(self, text) + else: # write operation + text, vhash = writetransform(self, text) + validatehash = validatehash and vhash + + return text, validatehash
--- a/mercurial/revset.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/revset.py Mon Sep 09 17:26:17 2019 -0400 @@ -1695,7 +1695,7 @@ parent. (EXPERIMENTAL) """ if x is None: - stacks = stackmod.getstack(repo, x) + stacks = stackmod.getstack(repo) else: stacks = smartset.baseset([]) for revision in getset(repo, fullreposet(repo), x):
--- a/mercurial/scmutil.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/scmutil.py Mon Sep 09 17:26:17 2019 -0400 @@ -1762,10 +1762,27 @@ categories.append(newcat) return wrapped + + @reportsummary + def reportchangegroup(repo, tr): + cgchangesets = tr.changes.get('changegroup-count-changesets', 0) + cgrevisions = tr.changes.get('changegroup-count-revisions', 0) + cgfiles = tr.changes.get('changegroup-count-files', 0) + cgheads = tr.changes.get('changegroup-count-heads', 0) + if cgchangesets or cgrevisions or cgfiles: + htext = "" + if cgheads: + htext = _(" (%+d heads)") % cgheads + msg = _("added %d changesets with %d changes to %d files%s\n") + repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext)) + if txmatch(_reportobsoletedsource): @reportsummary def reportobsoleted(repo, tr): obsoleted = obsutil.getobsoleted(repo, tr) + newmarkers = len(tr.changes.get('obsmarkers', ())) + if newmarkers: + repo.ui.status(_('%i new obsolescence markers\n') % newmarkers) if obsoleted: repo.ui.status(_('obsoleted %i changesets\n') % len(obsoleted)) @@ -1984,3 +2001,21 @@ "ancestors(head() and not bookmark(%s)) - " "ancestors(bookmark() and not bookmark(%s))", mark, mark, mark) + +def computechangesetfilesadded(ctx): + """return the list of files added in a changeset + """ + added = [] + for f in ctx.files(): + if not any(f in p for p in ctx.parents()): + added.append(f) + return added + +def computechangesetfilesremoved(ctx): + """return the list of files removed in a changeset + """ + removed = [] + for f in ctx.files(): + if f not in ctx: + removed.append(f) + return removed
--- a/mercurial/setdiscovery.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/setdiscovery.py Mon Sep 09 17:26:17 2019 -0400 @@ -52,6 +52,7 @@ ) from . import ( error, + policy, util, ) @@ -92,11 +93,19 @@ dist.setdefault(p, d + 1) visit.append(p) -def _limitsample(sample, desiredlen): - """return a random subset of sample of at most desiredlen item""" - if len(sample) > desiredlen: - sample = set(random.sample(sample, desiredlen)) - return sample +def _limitsample(sample, desiredlen, randomize=True): + """return a random subset of sample of at most desiredlen item. + + If randomize is False, though, a deterministic subset is returned. + This is meant for integration tests. + """ + if len(sample) <= desiredlen: + return sample + if randomize: + return set(random.sample(sample, desiredlen)) + sample = list(sample) + sample.sort() + return set(sample[:desiredlen]) class partialdiscovery(object): """an object representing ongoing discovery @@ -110,7 +119,7 @@ (all tracked revisions are known locally) """ - def __init__(self, repo, targetheads, respectsize): + def __init__(self, repo, targetheads, respectsize, randomize=True): self._repo = repo self._targetheads = targetheads self._common = repo.changelog.incrementalmissingrevs() @@ -118,6 +127,7 @@ self.missing = set() self._childrenmap = None self._respectsize = respectsize + self.randomize = randomize def addcommons(self, commons): """register nodes known as common""" @@ -222,7 +232,7 @@ sample = set(self._repo.revs('heads(%ld)', revs)) if len(sample) >= size: - return _limitsample(sample, size) + return _limitsample(sample, size, randomize=self.randomize) _updatesample(None, headrevs, sample, self._parentsgetter(), quicksamplesize=size) @@ -249,12 +259,21 @@ if not self._respectsize: size = max(size, min(len(revsroots), len(revsheads))) - sample = _limitsample(sample, size) + sample = _limitsample(sample, size, randomize=self.randomize) if len(sample) < size: more = size - len(sample) - sample.update(random.sample(list(revs - sample), more)) + takefrom = list(revs - sample) + if self.randomize: + sample.update(random.sample(takefrom, more)) + else: + takefrom.sort() + sample.update(takefrom[:more]) return sample +partialdiscovery = policy.importrust(r'discovery', + member=r'PartialDiscovery', + default=partialdiscovery) + def findcommonheads(ui, local, remote, initialsamplesize=100, fullsamplesize=200, @@ -370,13 +389,15 @@ return srvheadhashes, False, srvheadhashes if len(sample) == len(ownheads) and all(yesno): - ui.note(_("all local heads known remotely\n")) + ui.note(_("all local changesets known remotely\n")) ownheadhashes = [clnode(r) for r in ownheads] return ownheadhashes, True, srvheadhashes # full blown discovery - disco = partialdiscovery(local, ownheads, remote.limitedarguments) + randomize = ui.configbool('devel', 'discovery.randomize') + disco = partialdiscovery(local, ownheads, remote.limitedarguments, + randomize=randomize) # treat remote heads (and maybe own heads) as a first implicit sample # response disco.addcommons(knownsrvheads)
--- a/mercurial/shelve.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/shelve.py Mon Sep 09 17:26:17 2019 -0400 @@ -177,6 +177,7 @@ _nokeep = 'nokeep' # colon is essential to differentiate from a real bookmark name _noactivebook = ':no-active-bookmark' + _interactive = 'interactive' @classmethod def _verifyandtransform(cls, d): @@ -247,6 +248,7 @@ obj.activebookmark = '' if d.get('activebook', '') != cls._noactivebook: obj.activebookmark = d.get('activebook', '') + obj.interactive = d.get('interactive') == cls._interactive except (error.RepoLookupError, KeyError) as err: raise error.CorruptedState(pycompat.bytestr(err)) @@ -254,7 +256,7 @@ @classmethod def save(cls, repo, name, originalwctx, pendingctx, nodestoremove, - branchtorestore, keep=False, activebook=''): + branchtorestore, keep=False, activebook='', interactive=False): info = { "name": name, "originalwctx": nodemod.hex(originalwctx.node()), @@ -267,6 +269,8 @@ "keep": cls._keep if keep else cls._nokeep, "activebook": activebook or cls._noactivebook } + if interactive: + info['interactive'] = cls._interactive scmutil.simplekeyvaluefile( repo.vfs, cls._filename).write(info, firstline=("%d" % cls._version)) @@ -694,11 +698,12 @@ if shfile.exists(): shfile.movetobackup() cleanupoldbackups(repo) -def unshelvecontinue(ui, repo, state, opts, basename=None): +def unshelvecontinue(ui, repo, state, opts): """subcommand to continue an in-progress unshelve""" # We're finishing off a merge. First parent is our original # parent, second is the temporary "fake" commit we're unshelving. - interactive = opts.get('interactive') + interactive = state.interactive + basename = state.name with repo.lock(): checkparents(repo, state) ms = merge.mergestate.read(repo) @@ -721,15 +726,8 @@ with repo.ui.configoverride(overrides, 'unshelve'): with repo.dirstate.parentchange(): repo.setparents(state.parents[0], nodemod.nullid) - if not interactive: - ispartialunshelve = False - newnode = repo.commit(text=shelvectx.description(), - extra=shelvectx.extra(), - user=shelvectx.user(), - date=shelvectx.date()) - else: - newnode, ispartialunshelve = _dounshelveinteractive(ui, - repo, shelvectx, basename, opts) + newnode, ispartialunshelve = _createunshelvectx(ui, + repo, shelvectx, basename, interactive, opts) if newnode is None: # If it ended up being a no-op commit, then the normal @@ -749,11 +747,11 @@ mergefiles(ui, repo, state.wctx, shelvectx) restorebranch(ui, repo, state.branchtorestore) + if not phases.supportinternal(repo): + repair.strip(ui, repo, state.nodestoremove, backup=False, + topic='shelve') + shelvedstate.clear(repo) if not ispartialunshelve: - if not phases.supportinternal(repo): - repair.strip(ui, repo, state.nodestoremove, backup=False, - topic='shelve') - shelvedstate.clear(repo) unshelvecleanup(ui, repo, state.name, opts) _restoreactivebookmark(repo, state.activebookmark) ui.status(_("unshelve of '%s' complete\n") % state.name) @@ -804,14 +802,37 @@ return repo, shelvectx -def _dounshelveinteractive(ui, repo, shelvectx, basename, opts): - """The user might want to unshelve certain changes only from the stored - shelve. So, we would create two commits. One with requested changes to - unshelve at that time and the latter is shelved for future. +def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts): + """Handles the creation of unshelve commit and updates the shelve if it + was partially unshelved. + + If interactive is: + + * False: Commits all the changes in the working directory. + * True: Prompts the user to select changes to unshelve and commit them. + Update the shelve with remaining changes. + + Returns the node of the new commit formed and a bool indicating whether + the shelve was partially unshelved.Creates a commit ctx to unshelve + interactively or non-interactively. + + The user might want to unshelve certain changes only from the stored + shelve in interactive. So, we would create two commits. One with requested + changes to unshelve at that time and the latter is shelved for future. + + Here, we return both the newnode which is created interactively and a + bool to know whether the shelve is partly done or completely done. """ opts['message'] = shelvectx.description() opts['interactive-unshelve'] = True pats = [] + if not interactive: + newnode = repo.commit(text=shelvectx.description(), + extra=shelvectx.extra(), + user=shelvectx.user(), + date=shelvectx.date()) + return newnode, False + commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True) newnode = cmdutil.dorecord(ui, repo, commitfunc, None, False, @@ -819,10 +840,9 @@ **pycompat.strkwargs(opts)) snode = repo.commit(text=shelvectx.description(), extra=shelvectx.extra(), - user=shelvectx.user(), - date=shelvectx.date()) - m = scmutil.matchfiles(repo, repo[snode].files()) + user=shelvectx.user()) if snode: + m = scmutil.matchfiles(repo, repo[snode].files()) _shelvecreatedcommit(repo, snode, basename, m) return newnode, bool(snode) @@ -854,22 +874,16 @@ nodestoremove = [repo.changelog.node(rev) for rev in pycompat.xrange(oldtiprev, len(repo))] shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove, - branchtorestore, opts.get('keep'), activebookmark) + branchtorestore, opts.get('keep'), activebookmark, + interactive) raise error.InterventionRequired( _("unresolved conflicts (see 'hg resolve', then " "'hg unshelve --continue')")) with repo.dirstate.parentchange(): repo.setparents(tmpwctx.node(), nodemod.nullid) - if not interactive: - ispartialunshelve = False - newnode = repo.commit(text=shelvectx.description(), - extra=shelvectx.extra(), - user=shelvectx.user(), - date=shelvectx.date()) - else: - newnode, ispartialunshelve = _dounshelveinteractive(ui, repo, - shelvectx, basename, opts) + newnode, ispartialunshelve = _createunshelvectx(ui, repo, + shelvectx, basename, interactive, opts) if newnode is None: # If it ended up being a no-op commit, then the normal @@ -928,7 +942,9 @@ if opts.get("name"): shelved.append(opts["name"]) - if abortf or continuef and not interactive: + if interactive and opts.get('keep'): + raise error.Abort(_('--keep on --interactive is not yet supported')) + if abortf or continuef: if abortf and continuef: raise error.Abort(_('cannot use both abort and continue')) if shelved: @@ -940,6 +956,8 @@ state = _loadshelvedstate(ui, repo, opts) if abortf: return unshelveabort(ui, repo, state) + elif continuef and interactive: + raise error.Abort(_('cannot use both continue and interactive')) elif continuef: return unshelvecontinue(ui, repo, state, opts) elif len(shelved) > 1: @@ -950,11 +968,8 @@ raise error.Abort(_('no shelved changes to apply!')) basename = util.split(shelved[0][1])[1] ui.status(_("unshelving change '%s'\n") % basename) - elif shelved: + else: basename = shelved[0] - if continuef and interactive: - state = _loadshelvedstate(ui, repo, opts) - return unshelvecontinue(ui, repo, state, opts, basename) if not shelvedfile(repo, basename, patchextension).exists(): raise error.Abort(_("shelved change '%s' not found") % basename) @@ -990,11 +1005,10 @@ with ui.configoverride(overrides, 'unshelve'): mergefiles(ui, repo, pctx, shelvectx) restorebranch(ui, repo, branchtorestore) + shelvedstate.clear(repo) + _finishunshelve(repo, oldtiprev, tr, activebookmark) + _forgetunknownfiles(repo, shelvectx, addedbefore) if not ispartialunshelve: - _forgetunknownfiles(repo, shelvectx, addedbefore) - - shelvedstate.clear(repo) - _finishunshelve(repo, oldtiprev, tr, activebookmark) unshelvecleanup(ui, repo, basename, opts) finally: if tr:
--- a/mercurial/stack.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/stack.py Mon Sep 09 17:26:17 2019 -0400 @@ -22,7 +22,7 @@ if rev is None: rev = '.' - revspec = 'reverse(only(%s) and not public() and not ::merge())' + revspec = 'only(%s) and not public() and not ::merge()' revset = revsetlang.formatspec(revspec, rev) revisions = scmutil.revrange(repo, [revset]) revisions.sort()
--- a/mercurial/statprof.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/statprof.py Mon Sep 09 17:26:17 2019 -0400 @@ -236,18 +236,14 @@ def getsource(self, length): if self.source is None: lineno = self.lineno - 1 - fp = None try: - fp = open(self.path, 'rb') - for i, line in enumerate(fp): - if i == lineno: - self.source = line.strip() - break + with open(self.path, 'rb') as fp: + for i, line in enumerate(fp): + if i == lineno: + self.source = line.strip() + break except: pass - finally: - if fp: - fp.close() if self.source is None: self.source = '' @@ -733,10 +729,6 @@ fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n') return - fd, path = pycompat.mkstemp() - - file = open(path, "w+") - lines = {} for sample in data.samples: sites = [s.function for s in sample.stack] @@ -747,10 +739,11 @@ else: lines[line] = 1 - for line, count in lines.iteritems(): - file.write("%s %d\n" % (line, count)) + fd, path = pycompat.mkstemp() - file.close() + with open(path, "w+") as file: + for line, count in lines.iteritems(): + file.write("%s %d\n" % (line, count)) if outputfile is None: outputfile = '~/flamegraph.svg' @@ -766,7 +759,7 @@ if path in _pathcache: return _pathcache[path] - hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0] + hgpath = encoding.__file__.rsplit(os.sep, 2)[0] for p in [hgpath] + sys.path: prefix = p + os.sep if path.startswith(prefix): @@ -814,7 +807,7 @@ parent = stackid(stack[1:]) myid = len(stack2id) stack2id[stack] = myid - id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0])) + id2stack.append(dict(category=stack[0][0], name=r'%s %s' % stack[0])) if parent is not None: id2stack[-1].update(parent=parent) return myid @@ -849,7 +842,7 @@ if minthreshold <= duration <= maxthreshold: # ensure no zero-duration events sampletime = max(oldtime + clamp, sample.time) - samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid, + samples.append(dict(ph=r'E', name=oldfunc, cat=oldcat, sf=oldsid, ts=sampletime*1e6, pid=0)) else: blacklist.add(oldidx) @@ -858,8 +851,10 @@ # events given only stack snapshots. for sample in data.samples: - stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno), - frame.function) for frame in sample.stack)) + stack = tuple(((r'%s:%d' % (simplifypath(pycompat.sysstr(frame.path)), + frame.lineno), + pycompat.sysstr(frame.function)) + for frame in sample.stack)) qstack = collections.deque(stack) if laststack == qstack: continue @@ -873,15 +868,19 @@ laststack.appendleft(f) path, name = f sid = stackid(tuple(laststack)) - samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6, - sf=sid, pid=0)) + samples.append(dict(ph=r'B', name=name, cat=path, + ts=sample.time*1e6, sf=sid, pid=0)) laststack = collections.deque(stack) while laststack: poplast() - events = [s[1] for s in enumerate(samples) if s[0] not in blacklist] + events = [sample for idx, sample in enumerate(samples) + if idx not in blacklist] frames = collections.OrderedDict((str(k), v) for (k,v) in enumerate(id2stack)) - json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1) + data = json.dumps(dict(traceEvents=events, stackFrames=frames), indent=1) + if not isinstance(data, bytes): + data = data.encode('utf-8') + fp.write(data) fp.write('\n') def printusage():
--- a/mercurial/store.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/store.py Mon Sep 09 17:26:17 2019 -0400 @@ -15,7 +15,9 @@ from .i18n import _ from . import ( + changelog, error, + manifest, node, policy, pycompat, @@ -379,6 +381,14 @@ l.sort() return l + def changelog(self, trypending): + return changelog.changelog(self.vfs, trypending=trypending) + + def manifestlog(self, repo, storenarrowmatch): + rootstore = manifest.manifestrevlog(self.vfs) + return manifest.manifestlog( + self.vfs, repo, rootstore, storenarrowmatch) + def datafiles(self, matcher=None): return self._walk('data', True) + self._walk('meta', True)
--- a/mercurial/streamclone.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/streamclone.py Mon Sep 09 17:26:17 2019 -0400 @@ -12,13 +12,15 @@ import struct from .i18n import _ +from .interfaces import ( + repository, +) from . import ( cacheutil, error, narrowspec, phases, pycompat, - repository, store, util, )
--- a/mercurial/testing/storage.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/testing/storage.py Mon Sep 09 17:26:17 2019 -0400 @@ -17,6 +17,8 @@ from .. import ( error, mdiff, +) +from ..interfaces import ( repository, ) from ..utils import ( @@ -421,7 +423,7 @@ f.size(i) self.assertEqual(f.revision(nullid), b'') - self.assertEqual(f.revision(nullid, raw=True), b'') + self.assertEqual(f.rawdata(nullid), b'') with self.assertRaises(error.LookupError): f.revision(b'\x01' * 20) @@ -473,7 +475,7 @@ f.size(1) self.assertEqual(f.revision(node), fulltext) - self.assertEqual(f.revision(node, raw=True), fulltext) + self.assertEqual(f.rawdata(node), fulltext) self.assertEqual(f.read(node), fulltext) @@ -545,11 +547,11 @@ f.size(3) self.assertEqual(f.revision(node0), fulltext0) - self.assertEqual(f.revision(node0, raw=True), fulltext0) + self.assertEqual(f.rawdata(node0), fulltext0) self.assertEqual(f.revision(node1), fulltext1) - self.assertEqual(f.revision(node1, raw=True), fulltext1) + self.assertEqual(f.rawdata(node1), fulltext1) self.assertEqual(f.revision(node2), fulltext2) - self.assertEqual(f.revision(node2, raw=True), fulltext2) + self.assertEqual(f.rawdata(node2), fulltext2) with self.assertRaises(error.LookupError): f.revision(b'\x01' * 20) @@ -819,9 +821,9 @@ self.assertEqual(f.size(2), len(fulltext2)) self.assertEqual(f.revision(node1), stored1) - self.assertEqual(f.revision(node1, raw=True), stored1) + self.assertEqual(f.rawdata(node1), stored1) self.assertEqual(f.revision(node2), stored2) - self.assertEqual(f.revision(node2, raw=True), stored2) + self.assertEqual(f.rawdata(node2), stored2) self.assertEqual(f.read(node1), fulltext1) self.assertEqual(f.read(node2), fulltext2) @@ -862,10 +864,10 @@ self.assertEqual(f.size(1), len(fulltext1)) self.assertEqual(f.revision(node0), stored0) - self.assertEqual(f.revision(node0, raw=True), stored0) + self.assertEqual(f.rawdata(node0), stored0) self.assertEqual(f.revision(node1), stored1) - self.assertEqual(f.revision(node1, raw=True), stored1) + self.assertEqual(f.rawdata(node1), stored1) self.assertEqual(f.read(node0), fulltext0) self.assertEqual(f.read(node1), fulltext1) @@ -896,10 +898,10 @@ with self.assertRaises(error.StorageError): f.revision(node1) - # raw=True still verifies because there are no special storage + # rawdata() still verifies because there are no special storage # settings. with self.assertRaises(error.StorageError): - f.revision(node1, raw=True) + f.rawdata(node1) # read() behaves like revision(). with self.assertRaises(error.StorageError): @@ -909,7 +911,7 @@ # reading/validating the fulltext to return rename metadata. def testbadnoderevisionraw(self): - # Like above except we test revision(raw=True) first to isolate + # Like above except we test rawdata() first to isolate # revision caching behavior. f = self._makefilefn() @@ -924,10 +926,10 @@ rawtext=fulltext1) with self.assertRaises(error.StorageError): - f.revision(node1, raw=True) + f.rawdata(node1) with self.assertRaises(error.StorageError): - f.revision(node1, raw=True) + f.rawdata(node1) def testbadnoderevisionraw(self): # Like above except we test read() first to isolate revision caching @@ -1002,13 +1004,13 @@ f.revision(1) with self.assertRaises(error.CensoredNodeError): - f.revision(1, raw=True) + f.rawdata(1) with self.assertRaises(error.CensoredNodeError): f.read(1) def testcensoredrawrevision(self): - # Like above, except we do the revision(raw=True) request first to + # Like above, except we do the rawdata() request first to # isolate revision caching behavior. f = self._makefilefn() @@ -1027,7 +1029,7 @@ censored=True) with self.assertRaises(error.CensoredNodeError): - f.revision(1, raw=True) + f.rawdata(1) class ifilemutationtests(basetestcase): """Generic tests for the ifilemutation interface.
--- a/mercurial/ui.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/ui.py Mon Sep 09 17:26:17 2019 -0400 @@ -783,6 +783,17 @@ return None return default + def configdefault(self, section, name): + """returns the default value of the config item""" + item = self._knownconfig.get(section, {}).get(name) + itemdefault = None + if item is not None: + if callable(item.default): + itemdefault = item.default() + else: + itemdefault = item.default + return itemdefault + def hasconfig(self, section, name, untrusted=False): return self._data(untrusted).hasitem(section, name)
--- a/mercurial/unionrepo.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/unionrepo.py Mon Sep 09 17:26:17 2019 -0400 @@ -14,7 +14,6 @@ from __future__ import absolute_import from .i18n import _ -from .node import nullid from . import ( changelog, @@ -90,14 +89,11 @@ self.revlog2.rev(self.node(rev1)), self.revlog2.rev(self.node(rev2))) elif rev1 <= self.repotiprev and rev2 <= self.repotiprev: - return self.baserevdiff(rev1, rev2) - - return mdiff.textdiff(self.revision(rev1), self.revision(rev2)) + return super(unionrevlog, self).revdiff(rev1, rev2) - def revision(self, nodeorrev, _df=None, raw=False): - """return an uncompressed revision of a given node or revision - number. - """ + return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) + + def _revisiondata(self, nodeorrev, _df=None, raw=False): if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) @@ -105,26 +101,13 @@ node = nodeorrev rev = self.rev(node) - if node == nullid: - return "" - if rev > self.repotiprev: - text = self.revlog2.revision(node) - self._revisioncache = (node, rev, text) + # work around manifestrevlog NOT being a revlog + revlog2 = getattr(self.revlog2, '_revlog', self.revlog2) + func = revlog2._revisiondata else: - text = self.baserevision(rev) - # already cached - return text - - def baserevision(self, nodeorrev): - # Revlog subclasses may override 'revision' method to modify format of - # content retrieved from revlog. To use unionrevlog with such class one - # needs to override 'baserevision' and make more specific call here. - return revlog.revlog.revision(self, nodeorrev) - - def baserevdiff(self, rev1, rev2): - # Exists for the same purpose as baserevision. - return revlog.revlog.revdiff(self, rev1, rev2) + func = super(unionrevlog, self)._revisiondata + return func(node, _df=_df, raw=raw) def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): raise NotImplementedError @@ -144,15 +127,6 @@ unionrevlog.__init__(self, opener, self.indexfile, changelog2, linkmapper) - def baserevision(self, nodeorrev): - # Although changelog doesn't override 'revision' method, some extensions - # may replace this class with another that does. Same story with - # manifest and filelog classes. - return changelog.changelog.revision(self, nodeorrev) - - def baserevdiff(self, rev1, rev2): - return changelog.changelog.revdiff(self, rev1, rev2) - class unionmanifest(unionrevlog, manifest.manifestrevlog): def __init__(self, opener, opener2, linkmapper): manifest.manifestrevlog.__init__(self, opener) @@ -160,12 +134,6 @@ unionrevlog.__init__(self, opener, self.indexfile, manifest2, linkmapper) - def baserevision(self, nodeorrev): - return manifest.manifestrevlog.revision(self, nodeorrev) - - def baserevdiff(self, rev1, rev2): - return manifest.manifestrevlog.revdiff(self, rev1, rev2) - class unionfilelog(filelog.filelog): def __init__(self, opener, path, opener2, linkmapper, repo): filelog.filelog.__init__(self, opener, path) @@ -176,12 +144,6 @@ self.repotiprev = self._revlog.repotiprev self.revlog2 = self._revlog.revlog2 - def baserevision(self, nodeorrev): - return filelog.filelog.revision(self, nodeorrev) - - def baserevdiff(self, rev1, rev2): - return filelog.filelog.revdiff(self, rev1, rev2) - def iscensored(self, rev): """Check if a revision is censored.""" if rev <= self.repotiprev:
--- a/mercurial/upgrade.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/upgrade.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,6 +28,12 @@ compression, ) +# list of requirements that request a clone of all revlog if added/removed +RECLONES_REQUIREMENTS = { + 'generaldelta', + localrepo.SPARSEREVLOG_REQUIREMENT, +} + def requiredsourcerequirements(repo): """Obtain requirements required to be present to upgrade a repo. @@ -533,7 +539,55 @@ #reverse of "/".join(("data", path + ".i")) return filelog.filelog(repo.svfs, path[5:-2]) -def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents): +def _copyrevlog(tr, destrepo, oldrl, unencodedname): + """copy all relevant files for `oldrl` into `destrepo` store + + Files are copied "as is" without any transformation. The copy is performed + without extra checks. Callers are responsible for making sure the copied + content is compatible with format of the destination repository. + """ + oldrl = getattr(oldrl, '_revlog', oldrl) + newrl = _revlogfrompath(destrepo, unencodedname) + newrl = getattr(newrl, '_revlog', newrl) + + oldvfs = oldrl.opener + newvfs = newrl.opener + oldindex = oldvfs.join(oldrl.indexfile) + newindex = newvfs.join(newrl.indexfile) + olddata = oldvfs.join(oldrl.datafile) + newdata = newvfs.join(newrl.datafile) + + newdir = newvfs.dirname(newrl.indexfile) + newvfs.makedirs(newdir) + + util.copyfile(oldindex, newindex) + if oldrl.opener.exists(olddata): + util.copyfile(olddata, newdata) + + if not (unencodedname.endswith('00changelog.i') + or unencodedname.endswith('00manifest.i')): + destrepo.svfs.fncache.add(unencodedname) + +UPGRADE_CHANGELOG = object() +UPGRADE_MANIFEST = object() +UPGRADE_FILELOG = object() + +UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG, + UPGRADE_MANIFEST, + UPGRADE_FILELOG]) + +def matchrevlog(revlogfilter, entry): + """check is a revlog is selected for cloning + + The store entry is checked against the passed filter""" + if entry.endswith('00changelog.i'): + return UPGRADE_CHANGELOG in revlogfilter + elif entry.endswith('00manifest.i'): + return UPGRADE_MANIFEST in revlogfilter + return UPGRADE_FILELOG in revlogfilter + +def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents, + revlogs=UPGRADE_ALL_REVLOGS): """Copy revlogs between 2 repos.""" revcount = 0 srcsize = 0 @@ -554,9 +608,11 @@ crawsize = 0 cdstsize = 0 + alldatafiles = list(srcrepo.store.walk()) + # Perform a pass to collect metadata. This validates we can open all # source files and allows a unified progress bar to be displayed. - for unencoded, encoded, size in srcrepo.store.walk(): + for unencoded, encoded, size in alldatafiles: if unencoded.endswith('.d'): continue @@ -607,12 +663,11 @@ # Do the actual copying. # FUTURE this operation can be farmed off to worker processes. seen = set() - for unencoded, encoded, size in srcrepo.store.walk(): + for unencoded, encoded, size in alldatafiles: if unencoded.endswith('.d'): continue oldrl = _revlogfrompath(srcrepo, unencoded) - newrl = _revlogfrompath(dstrepo, unencoded) if isinstance(oldrl, changelog.changelog) and 'c' not in seen: ui.write(_('finished migrating %d manifest revisions across %d ' @@ -651,11 +706,19 @@ progress = srcrepo.ui.makeprogress(_('file revisions'), total=frevcount) + if matchrevlog(revlogs, unencoded): + ui.note(_('cloning %d revisions from %s\n') + % (len(oldrl), unencoded)) + newrl = _revlogfrompath(dstrepo, unencoded) + oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, + deltareuse=deltareuse, + forcedeltabothparents=forcedeltabothparents) + else: + msg = _('blindly copying %s containing %i revisions\n') + ui.note(msg % (unencoded, len(oldrl))) + _copyrevlog(tr, dstrepo, oldrl, unencoded) - ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded)) - oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, - deltareuse=deltareuse, - forcedeltabothparents=forcedeltabothparents) + newrl = _revlogfrompath(dstrepo, unencoded) info = newrl.storageinfo(storedsize=True) datasize = info['storedsize'] or 0 @@ -715,7 +778,8 @@ before the new store is swapped into the original location. """ -def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions): +def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions, + revlogs=UPGRADE_ALL_REVLOGS): """Do the low-level work of upgrading a repository. The upgrade is effectively performed as a copy between a source @@ -743,8 +807,8 @@ deltareuse = revlog.revlog.DELTAREUSEALWAYS with dstrepo.transaction('upgrade') as tr: - _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, - 're-delta-multibase' in actions) + _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, + 're-delta-multibase' in actions, revlogs=revlogs) # Now copy other files in the store directory. # The sorted() makes execution deterministic. @@ -806,13 +870,35 @@ return backuppath -def upgraderepo(ui, repo, run=False, optimize=None, backup=True): +def upgraderepo(ui, repo, run=False, optimize=None, backup=True, + manifest=None, changelog=None): """Upgrade a repository in place.""" if optimize is None: optimize = [] optimize = set(legacy_opts_map.get(o, o) for o in optimize) repo = repo.unfiltered() + revlogs = set(UPGRADE_ALL_REVLOGS) + specentries = (('c', changelog), ('m', manifest)) + specified = [(y, x) for (y, x) in specentries if x is not None] + if specified: + # we have some limitation on revlogs to be recloned + if any(x for y, x in specified): + revlogs = set() + for r, enabled in specified: + if enabled: + if r == 'c': + revlogs.add(UPGRADE_CHANGELOG) + elif r == 'm': + revlogs.add(UPGRADE_MANIFEST) + else: + # none are enabled + for r, __ in specified: + if r == 'c': + revlogs.discard(UPGRADE_CHANGELOG) + elif r == 'm': + revlogs.discard(UPGRADE_MANIFEST) + # Ensure the repository can be upgraded. missingreqs = requiredsourcerequirements(repo) - repo.requirements if missingreqs: @@ -872,6 +958,17 @@ # determineactions could have added optimisation if o not in actions) + removedreqs = repo.requirements - newreqs + addedreqs = newreqs - repo.requirements + + if revlogs != UPGRADE_ALL_REVLOGS: + incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) + if incompatible: + msg = _('ignoring revlogs selection flags, format requirements ' + 'change: %s\n') + ui.warn(msg % ', '.join(sorted(incompatible))) + revlogs = UPGRADE_ALL_REVLOGS + def printrequirements(): ui.write(_('requirements\n')) ui.write(_(' preserved: %s\n') % @@ -962,7 +1059,7 @@ with dstrepo.wlock(), dstrepo.lock(): backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, - upgradeactions) + upgradeactions, revlogs=revlogs) if not (backup or backuppath is None): ui.write(_('removing old repository content%s\n') % backuppath) repo.vfs.rmtree(backuppath, forcibly=True)
--- a/mercurial/util.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/util.py Mon Sep 09 17:26:17 2019 -0400 @@ -53,7 +53,7 @@ stringutil, ) -rustdirs = policy.importrust('dirstate', 'Dirs') +rustdirs = policy.importrust(r'dirstate', r'Dirs') base85 = policy.importmod(r'base85') osutil = policy.importmod(r'osutil')
--- a/mercurial/utils/interfaceutil.py Sat Sep 07 14:35:21 2019 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -# interfaceutil.py - Utilities for declaring interfaces. -# -# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -# zope.interface imposes a run-time cost due to module import overhead and -# bookkeeping for declaring interfaces. So, we use stubs for various -# zope.interface primitives unless instructed otherwise. - -from __future__ import absolute_import - -from .. import ( - encoding, -) - -if encoding.environ.get('HGREALINTERFACES'): - from ..thirdparty.zope import ( - interface as zi, - ) - - Attribute = zi.Attribute - Interface = zi.Interface - implementer = zi.implementer -else: - class Attribute(object): - def __init__(self, __name__, __doc__=''): - pass - - class Interface(object): - def __init__(self, name, bases=(), attrs=None, __doc__=None, - __module__=None): - pass - - def implementer(*ifaces): - def wrapper(cls): - return cls - - return wrapper
--- a/mercurial/utils/procutil.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/utils/procutil.py Mon Sep 09 17:26:17 2019 -0400 @@ -245,8 +245,8 @@ pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'): _sethgexecutable(pycompat.fsencode(mainmod.__file__)) else: - exe = findexe('hg') or os.path.basename(sys.argv[0]) - _sethgexecutable(exe) + _sethgexecutable(findexe('hg') or + os.path.basename(pycompat.sysargv[0])) return _hgexecutable def _sethgexecutable(path):
--- a/mercurial/utils/storageutil.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/utils/storageutil.py Mon Sep 09 17:26:17 2019 -0400 @@ -22,8 +22,8 @@ error, mdiff, pycompat, - repository, ) +from ..interfaces import repository _nullhash = hashlib.sha1(nullid) @@ -304,9 +304,9 @@ ``rawsizefn`` (optional) Callable receiving a revision number and returning the length of the - ``store.revision(rev, raw=True)``. + ``store.rawdata(rev)``. - If not defined, ``len(store.revision(rev, raw=True))`` will be called. + If not defined, ``len(store.rawdata(rev))`` will be called. ``revdifffn`` (optional) Callable receiving a pair of revision numbers that returns a delta @@ -422,7 +422,7 @@ if revisiondata: if store.iscensored(baserev) or store.iscensored(rev): try: - revision = store.revision(node, raw=True) + revision = store.rawdata(node) except error.CensoredNodeError as e: revision = e.tombstone @@ -430,19 +430,18 @@ if rawsizefn: baserevisionsize = rawsizefn(baserev) else: - baserevisionsize = len(store.revision(baserev, - raw=True)) + baserevisionsize = len(store.rawdata(baserev)) elif (baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV): - revision = store.revision(node, raw=True) + revision = store.rawdata(node) available.add(rev) else: if revdifffn: delta = revdifffn(baserev, rev) else: - delta = mdiff.textdiff(store.revision(baserev, raw=True), - store.revision(rev, raw=True)) + delta = mdiff.textdiff(store.rawdata(baserev), + store.rawdata(rev)) available.add(rev)
--- a/mercurial/wireprotoserver.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/wireprotoserver.py Mon Sep 09 17:26:17 2019 -0400 @@ -21,10 +21,12 @@ wireprotov1server, wireprotov2server, ) +from .interfaces import ( + util as interfaceutil, +) from .utils import ( cborutil, compression, - interfaceutil, ) stringio = util.stringio @@ -655,6 +657,8 @@ continue rsp = wireprotov1server.dispatch(repo, proto, request) + repo.ui.fout.flush() + repo.ui.ferr.flush() if isinstance(rsp, bytes): _sshv1respondbytes(fout, rsp)
--- a/mercurial/wireprototypes.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/wireprototypes.py Mon Sep 09 17:26:17 2019 -0400 @@ -17,9 +17,11 @@ error, util, ) +from .interfaces import ( + util as interfaceutil, +) from .utils import ( compression, - interfaceutil, ) # Names of the SSH protocol implementations.
--- a/mercurial/wireprotov1peer.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/wireprotov1peer.py Mon Sep 09 17:26:17 2019 -0400 @@ -22,12 +22,12 @@ error, pushkey as pushkeymod, pycompat, - repository, util, wireprototypes, ) -from .utils import ( - interfaceutil, +from .interfaces import ( + repository, + util as interfaceutil, ) urlreq = util.urlreq
--- a/mercurial/wireprotov2server.py Sat Sep 07 14:35:21 2019 +0100 +++ b/mercurial/wireprotov2server.py Mon Sep 09 17:26:17 2019 -0400 @@ -28,9 +28,11 @@ wireprotoframing, wireprototypes, ) +from .interfaces import ( + util as interfaceutil, +) from .utils import ( cborutil, - interfaceutil, stringutil, ) @@ -937,7 +939,7 @@ followingdata = [] if b'revision' in fields: - revisiondata = cl.revision(node, raw=True) + revisiondata = cl.rawdata(node) followingmeta.append((b'revision', len(revisiondata))) followingdata.append(revisiondata)
--- a/relnotes/next Sat Sep 07 14:35:21 2019 +0100 +++ b/relnotes/next Mon Sep 09 17:26:17 2019 -0400 @@ -9,6 +9,11 @@ == Backwards Compatibility Changes == + * A shell that supports `$(command)`` syntax for command substitution is now + required for running the test suite. The test runner normally uses + `sh`, so if that is a shell that doesn't support `$(command)` syntax, + you can override it by setting `$HGTEST_SHELL` or by passing it to + `run-tests.py --shell <shell>`. == Internal API Changes ==
--- a/rust/hg-core/Cargo.toml Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/Cargo.toml Mon Sep 09 17:26:17 2019 -0400 @@ -8,12 +8,10 @@ [lib] name = "hg" -[dev-dependencies] -rand = "*" -rand_pcg = "*" - [dependencies] byteorder = "1.3.1" lazy_static = "1.3.0" memchr = "2.2.0" +rand = "> 0.6.4" +rand_pcg = "> 0.1.0" regex = "^1.1"
--- a/rust/hg-core/src/ancestors.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/ancestors.rs Mon Sep 09 17:26:17 2019 -0400 @@ -784,5 +784,4 @@ missing_ancestors.remove_ancestors_from(&mut revs).unwrap(); assert!(!revs.contains(&problem_rev)); } - }
--- a/rust/hg-core/src/dagops.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/dagops.rs Mon Sep 09 17:26:17 2019 -0400 @@ -8,10 +8,10 @@ //! Miscellaneous DAG operations //! //! # Terminology -//! - By *relative heads* of a collection of revision numbers (`Revision`), -//! we mean those revisions that have no children among the collection. -//! - Similarly *relative roots* of a collection of `Revision`, we mean -//! those whose parents, if any, don't belong to the collection. +//! - By *relative heads* of a collection of revision numbers (`Revision`), we +//! mean those revisions that have no children among the collection. +//! - Similarly *relative roots* of a collection of `Revision`, we mean those +//! whose parents, if any, don't belong to the collection. use super::{Graph, GraphError, Revision, NULL_REVISION}; use crate::ancestors::AncestorsIterator; use std::collections::{BTreeSet, HashSet}; @@ -272,5 +272,4 @@ ); Ok(()) } - }
--- a/rust/hg-core/src/dirstate.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/dirstate.rs Mon Sep 09 17:26:17 2019 -0400 @@ -1,36 +1,76 @@ +// dirstate module +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +use crate::DirstateParseError; +use std::collections::hash_map; +use std::collections::HashMap; +use std::convert::TryFrom; + pub mod dirs_multiset; +pub mod dirstate_map; pub mod parsers; -#[derive(Debug, PartialEq, Copy, Clone)] -pub struct DirstateParents<'a> { - pub p1: &'a [u8], - pub p2: &'a [u8], +#[derive(Debug, PartialEq, Clone)] +pub struct DirstateParents { + pub p1: [u8; 20], + pub p2: [u8; 20], } /// The C implementation uses all signed types. This will be an issue /// either when 4GB+ source files are commonplace or in 2038, whichever /// comes first. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Copy, Clone)] pub struct DirstateEntry { - pub state: i8, + pub state: EntryState, pub mode: i32, pub mtime: i32, pub size: i32, } -pub type DirstateVec = Vec<(Vec<u8>, DirstateEntry)>; +pub type StateMap = HashMap<Vec<u8>, DirstateEntry>; +pub type StateMapIter<'a> = hash_map::Iter<'a, Vec<u8>, DirstateEntry>; +pub type CopyMap = HashMap<Vec<u8>, Vec<u8>>; +pub type CopyMapIter<'a> = hash_map::Iter<'a, Vec<u8>, Vec<u8>>; -#[derive(Debug, PartialEq)] -pub struct CopyVecEntry<'a> { - pub path: &'a [u8], - pub copy_path: &'a [u8], +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum EntryState { + Normal, + Added, + Removed, + Merged, + Unknown, } -pub type CopyVec<'a> = Vec<CopyVecEntry<'a>>; +impl TryFrom<u8> for EntryState { + type Error = DirstateParseError; -/// The Python implementation passes either a mapping (dirstate) or a flat -/// iterable (manifest) -pub enum DirsIterable { - Dirstate(DirstateVec), - Manifest(Vec<Vec<u8>>), + fn try_from(value: u8) -> Result<Self, Self::Error> { + match value { + b'n' => Ok(EntryState::Normal), + b'a' => Ok(EntryState::Added), + b'r' => Ok(EntryState::Removed), + b'm' => Ok(EntryState::Merged), + b'?' => Ok(EntryState::Unknown), + _ => Err(DirstateParseError::CorruptedEntry(format!( + "Incorrect entry state {}", + value + ))), + } + } } + +impl Into<u8> for EntryState { + fn into(self) -> u8 { + match self { + EntryState::Normal => b'n', + EntryState::Added => b'a', + EntryState::Removed => b'r', + EntryState::Merged => b'm', + EntryState::Unknown => b'?', + } + } +}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Mon Sep 09 17:26:17 2019 -0400 @@ -8,47 +8,59 @@ //! A multiset of directory names. //! //! Used to counts the references to directories in a manifest or dirstate. -use crate::{utils::files, DirsIterable, DirstateEntry, DirstateMapError}; -use std::collections::hash_map::{Entry, Iter}; +use crate::{ + dirstate::EntryState, utils::files, DirstateEntry, DirstateMapError, +}; +use std::collections::hash_map::{self, Entry}; use std::collections::HashMap; +// could be encapsulated if we care API stability more seriously +pub type DirsMultisetIter<'a> = hash_map::Keys<'a, Vec<u8>, u32>; + #[derive(PartialEq, Debug)] pub struct DirsMultiset { inner: HashMap<Vec<u8>, u32>, } impl DirsMultiset { - /// Initializes the multiset from a dirstate or a manifest. + /// Initializes the multiset from a dirstate. /// /// If `skip_state` is provided, skips dirstate entries with equal state. - pub fn new(iterable: DirsIterable, skip_state: Option<i8>) -> Self { + pub fn from_dirstate( + vec: &HashMap<Vec<u8>, DirstateEntry>, + skip_state: Option<EntryState>, + ) -> Self { let mut multiset = DirsMultiset { inner: HashMap::new(), }; - match iterable { - DirsIterable::Dirstate(vec) => { - for (ref filename, DirstateEntry { state, .. }) in vec { - // This `if` is optimized out of the loop - if let Some(skip) = skip_state { - if skip != state { - multiset.add_path(filename); - } - } else { - multiset.add_path(filename); - } - } - } - DirsIterable::Manifest(vec) => { - for ref filename in vec { + for (filename, DirstateEntry { state, .. }) in vec { + // This `if` is optimized out of the loop + if let Some(skip) = skip_state { + if skip != *state { multiset.add_path(filename); } + } else { + multiset.add_path(filename); } } multiset } + /// Initializes the multiset from a manifest. + pub fn from_manifest(vec: &Vec<Vec<u8>>) -> Self { + let mut multiset = DirsMultiset { + inner: HashMap::new(), + }; + + for filename in vec { + multiset.add_path(filename); + } + + multiset + } + /// Increases the count of deepest directory contained in the path. /// /// If the directory is not yet in the map, adds its parents. @@ -92,12 +104,12 @@ Ok(()) } - pub fn contains_key(&self, key: &[u8]) -> bool { + pub fn contains(&self, key: &[u8]) -> bool { self.inner.contains_key(key) } - pub fn iter(&self) -> Iter<Vec<u8>, u32> { - self.inner.iter() + pub fn iter(&self) -> DirsMultisetIter { + self.inner.keys() } pub fn len(&self) -> usize { @@ -108,10 +120,11 @@ #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; #[test] fn test_delete_path_path_not_found() { - let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None); + let mut map = DirsMultiset::from_manifest(&vec![]); let path = b"doesnotexist/"; assert_eq!( Err(DirstateMapError::PathNotFound(path.to_vec())), @@ -121,8 +134,7 @@ #[test] fn test_delete_path_empty_path() { - let mut map = - DirsMultiset::new(DirsIterable::Manifest(vec![vec![]]), None); + let mut map = DirsMultiset::from_manifest(&vec![vec![]]); let path = b""; assert_eq!(Ok(()), map.delete_path(path)); assert_eq!( @@ -162,7 +174,7 @@ #[test] fn test_add_path_empty_path() { - let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None); + let mut map = DirsMultiset::from_manifest(&vec![]); let path = b""; map.add_path(path); @@ -171,7 +183,7 @@ #[test] fn test_add_path_successful() { - let mut map = DirsMultiset::new(DirsIterable::Manifest(vec![]), None); + let mut map = DirsMultiset::from_manifest(&vec![]); map.add_path(b"a/"); assert_eq!(1, *map.inner.get(&b"a".to_vec()).unwrap()); @@ -216,15 +228,13 @@ #[test] fn test_dirsmultiset_new_empty() { - use DirsIterable::{Dirstate, Manifest}; - - let new = DirsMultiset::new(Manifest(vec![]), None); + let new = DirsMultiset::from_manifest(&vec![]); let expected = DirsMultiset { inner: HashMap::new(), }; assert_eq!(expected, new); - let new = DirsMultiset::new(Dirstate(vec![]), None); + let new = DirsMultiset::from_dirstate(&HashMap::new(), None); let expected = DirsMultiset { inner: HashMap::new(), }; @@ -233,8 +243,6 @@ #[test] fn test_dirsmultiset_new_no_skip() { - use DirsIterable::{Dirstate, Manifest}; - let input_vec = ["a/", "b/", "a/c", "a/d/"] .iter() .map(|e| e.as_bytes().to_vec()) @@ -244,7 +252,7 @@ .map(|(k, v)| (k.as_bytes().to_vec(), *v)) .collect(); - let new = DirsMultiset::new(Manifest(input_vec), None); + let new = DirsMultiset::from_manifest(&input_vec); let expected = DirsMultiset { inner: expected_inner, }; @@ -256,7 +264,7 @@ ( f.as_bytes().to_vec(), DirstateEntry { - state: 0, + state: EntryState::Normal, mode: 0, mtime: 0, size: 0, @@ -269,7 +277,7 @@ .map(|(k, v)| (k.as_bytes().to_vec(), *v)) .collect(); - let new = DirsMultiset::new(Dirstate(input_map), None); + let new = DirsMultiset::from_dirstate(&input_map, None); let expected = DirsMultiset { inner: expected_inner, }; @@ -278,39 +286,25 @@ #[test] fn test_dirsmultiset_new_skip() { - use DirsIterable::{Dirstate, Manifest}; - - let input_vec = ["a/", "b/", "a/c", "a/d/"] - .iter() - .map(|e| e.as_bytes().to_vec()) - .collect(); - let expected_inner = [("", 2), ("a", 3), ("b", 1), ("a/d", 1)] - .iter() - .map(|(k, v)| (k.as_bytes().to_vec(), *v)) - .collect(); - - let new = DirsMultiset::new(Manifest(input_vec), Some('n' as i8)); - let expected = DirsMultiset { - inner: expected_inner, - }; - // Skip does not affect a manifest - assert_eq!(expected, new); - - let input_map = - [("a/", 'n'), ("a/b/", 'n'), ("a/c", 'r'), ("a/d/", 'm')] - .iter() - .map(|(f, state)| { - ( - f.as_bytes().to_vec(), - DirstateEntry { - state: *state as i8, - mode: 0, - mtime: 0, - size: 0, - }, - ) - }) - .collect(); + let input_map = [ + ("a/", EntryState::Normal), + ("a/b/", EntryState::Normal), + ("a/c", EntryState::Removed), + ("a/d/", EntryState::Merged), + ] + .iter() + .map(|(f, state)| { + ( + f.as_bytes().to_vec(), + DirstateEntry { + state: *state, + mode: 0, + mtime: 0, + size: 0, + }, + ) + }) + .collect(); // "a" incremented with "a/c" and "a/d/" let expected_inner = [("", 1), ("a", 2), ("a/d", 1)] @@ -318,11 +312,11 @@ .map(|(k, v)| (k.as_bytes().to_vec(), *v)) .collect(); - let new = DirsMultiset::new(Dirstate(input_map), Some('n' as i8)); + let new = + DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal)); let expected = DirsMultiset { inner: expected_inner, }; assert_eq!(expected, new); } - }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/dirstate/dirstate_map.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,423 @@ +// dirstate_map.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +use crate::{ + dirstate::{parsers::PARENT_SIZE, EntryState}, + pack_dirstate, parse_dirstate, + utils::files::normalize_case, + CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError, + DirstateParents, DirstateParseError, StateMap, +}; +use core::borrow::Borrow; +use std::collections::{HashMap, HashSet}; +use std::convert::TryInto; +use std::iter::FromIterator; +use std::ops::Deref; +use std::time::Duration; + +pub type FileFoldMap = HashMap<Vec<u8>, Vec<u8>>; + +const NULL_ID: [u8; 20] = [0; 20]; +const MTIME_UNSET: i32 = -1; +const SIZE_DIRTY: i32 = -2; + +#[derive(Default)] +pub struct DirstateMap { + state_map: StateMap, + pub copy_map: CopyMap, + file_fold_map: Option<FileFoldMap>, + pub dirs: Option<DirsMultiset>, + pub all_dirs: Option<DirsMultiset>, + non_normal_set: HashSet<Vec<u8>>, + other_parent_set: HashSet<Vec<u8>>, + parents: Option<DirstateParents>, + dirty_parents: bool, +} + +/// Should only really be used in python interface code, for clarity +impl Deref for DirstateMap { + type Target = StateMap; + + fn deref(&self) -> &Self::Target { + &self.state_map + } +} + +impl FromIterator<(Vec<u8>, DirstateEntry)> for DirstateMap { + fn from_iter<I: IntoIterator<Item = (Vec<u8>, DirstateEntry)>>( + iter: I, + ) -> Self { + Self { + state_map: iter.into_iter().collect(), + ..Self::default() + } + } +} + +impl DirstateMap { + pub fn new() -> Self { + Self::default() + } + + pub fn clear(&mut self) { + self.state_map.clear(); + self.copy_map.clear(); + self.file_fold_map = None; + self.non_normal_set.clear(); + self.other_parent_set.clear(); + self.set_parents(&DirstateParents { + p1: NULL_ID, + p2: NULL_ID, + }) + } + + /// Add a tracked file to the dirstate + pub fn add_file( + &mut self, + filename: &[u8], + old_state: EntryState, + entry: DirstateEntry, + ) { + if old_state == EntryState::Unknown || old_state == EntryState::Removed + { + if let Some(ref mut dirs) = self.dirs { + dirs.add_path(filename) + } + } + if old_state == EntryState::Unknown { + if let Some(ref mut all_dirs) = self.all_dirs { + all_dirs.add_path(filename) + } + } + self.state_map.insert(filename.to_owned(), entry.to_owned()); + + if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET { + self.non_normal_set.insert(filename.to_owned()); + } + + if entry.size == SIZE_DIRTY { + self.other_parent_set.insert(filename.to_owned()); + } + } + + /// Mark a file as removed in the dirstate. + /// + /// The `size` parameter is used to store sentinel values that indicate + /// the file's previous state. In the future, we should refactor this + /// to be more explicit about what that state is. + pub fn remove_file( + &mut self, + filename: &[u8], + old_state: EntryState, + size: i32, + ) -> Result<(), DirstateMapError> { + if old_state != EntryState::Unknown && old_state != EntryState::Removed + { + if let Some(ref mut dirs) = self.dirs { + dirs.delete_path(filename)?; + } + } + if old_state == EntryState::Unknown { + if let Some(ref mut all_dirs) = self.all_dirs { + all_dirs.add_path(filename); + } + } + + if let Some(ref mut file_fold_map) = self.file_fold_map { + file_fold_map.remove(&normalize_case(filename)); + } + self.state_map.insert( + filename.to_owned(), + DirstateEntry { + state: EntryState::Removed, + mode: 0, + size, + mtime: 0, + }, + ); + self.non_normal_set.insert(filename.to_owned()); + Ok(()) + } + + /// Remove a file from the dirstate. + /// Returns `true` if the file was previously recorded. + pub fn drop_file( + &mut self, + filename: &[u8], + old_state: EntryState, + ) -> Result<bool, DirstateMapError> { + let exists = self.state_map.remove(filename).is_some(); + + if exists { + if old_state != EntryState::Removed { + if let Some(ref mut dirs) = self.dirs { + dirs.delete_path(filename)?; + } + } + if let Some(ref mut all_dirs) = self.all_dirs { + all_dirs.delete_path(filename)?; + } + } + if let Some(ref mut file_fold_map) = self.file_fold_map { + file_fold_map.remove(&normalize_case(filename)); + } + self.non_normal_set.remove(filename); + + Ok(exists) + } + + pub fn clear_ambiguous_times( + &mut self, + filenames: Vec<Vec<u8>>, + now: i32, + ) { + for filename in filenames { + let mut changed = false; + self.state_map + .entry(filename.to_owned()) + .and_modify(|entry| { + if entry.state == EntryState::Normal && entry.mtime == now + { + changed = true; + *entry = DirstateEntry { + mtime: MTIME_UNSET, + ..*entry + }; + } + }); + if changed { + self.non_normal_set.insert(filename.to_owned()); + } + } + } + + pub fn non_normal_other_parent_entries( + &self, + ) -> (HashSet<Vec<u8>>, HashSet<Vec<u8>>) { + let mut non_normal = HashSet::new(); + let mut other_parent = HashSet::new(); + + for ( + filename, + DirstateEntry { + state, size, mtime, .. + }, + ) in self.state_map.iter() + { + if *state != EntryState::Normal || *mtime == MTIME_UNSET { + non_normal.insert(filename.to_owned()); + } + if *state == EntryState::Normal && *size == SIZE_DIRTY { + other_parent.insert(filename.to_owned()); + } + } + + (non_normal, other_parent) + } + + /// Both of these setters and their uses appear to be the simplest way to + /// emulate a Python lazy property, but it is ugly and unidiomatic. + /// TODO One day, rewriting this struct using the typestate might be a + /// good idea. + pub fn set_all_dirs(&mut self) { + if self.all_dirs.is_none() { + self.all_dirs = + Some(DirsMultiset::from_dirstate(&self.state_map, None)); + } + } + + pub fn set_dirs(&mut self) { + if self.dirs.is_none() { + self.dirs = Some(DirsMultiset::from_dirstate( + &self.state_map, + Some(EntryState::Removed), + )); + } + } + + pub fn has_tracked_dir(&mut self, directory: &[u8]) -> bool { + self.set_dirs(); + self.dirs.as_ref().unwrap().contains(directory) + } + + pub fn has_dir(&mut self, directory: &[u8]) -> bool { + self.set_all_dirs(); + self.all_dirs.as_ref().unwrap().contains(directory) + } + + pub fn parents( + &mut self, + file_contents: &[u8], + ) -> Result<&DirstateParents, DirstateError> { + if let Some(ref parents) = self.parents { + return Ok(parents); + } + let parents; + if file_contents.len() == PARENT_SIZE * 2 { + parents = DirstateParents { + p1: file_contents[..PARENT_SIZE].try_into().unwrap(), + p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2] + .try_into() + .unwrap(), + }; + } else if file_contents.is_empty() { + parents = DirstateParents { + p1: NULL_ID, + p2: NULL_ID, + }; + } else { + return Err(DirstateError::Parse(DirstateParseError::Damaged)); + } + + self.parents = Some(parents); + Ok(self.parents.as_ref().unwrap()) + } + + pub fn set_parents(&mut self, parents: &DirstateParents) { + self.parents = Some(parents.clone()); + self.dirty_parents = true; + } + + pub fn read( + &mut self, + file_contents: &[u8], + ) -> Result<Option<DirstateParents>, DirstateError> { + if file_contents.is_empty() { + return Ok(None); + } + + let parents = parse_dirstate( + &mut self.state_map, + &mut self.copy_map, + file_contents, + )?; + + if !self.dirty_parents { + self.set_parents(&parents); + } + + Ok(Some(parents)) + } + + pub fn pack( + &mut self, + parents: DirstateParents, + now: Duration, + ) -> Result<Vec<u8>, DirstateError> { + let packed = + pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?; + + self.dirty_parents = false; + + let result = self.non_normal_other_parent_entries(); + self.non_normal_set = result.0; + self.other_parent_set = result.1; + Ok(packed) + } + + pub fn build_file_fold_map(&mut self) -> &FileFoldMap { + if let Some(ref file_fold_map) = self.file_fold_map { + return file_fold_map; + } + let mut new_file_fold_map = FileFoldMap::new(); + for (filename, DirstateEntry { state, .. }) in self.state_map.borrow() + { + if *state == EntryState::Removed { + new_file_fold_map + .insert(normalize_case(filename), filename.to_owned()); + } + } + self.file_fold_map = Some(new_file_fold_map); + self.file_fold_map.as_ref().unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dirs_multiset() { + let mut map = DirstateMap::new(); + assert!(map.dirs.is_none()); + assert!(map.all_dirs.is_none()); + + assert_eq!(false, map.has_dir(b"nope")); + assert!(map.all_dirs.is_some()); + assert!(map.dirs.is_none()); + + assert_eq!(false, map.has_tracked_dir(b"nope")); + assert!(map.dirs.is_some()); + } + + #[test] + fn test_add_file() { + let mut map = DirstateMap::new(); + + assert_eq!(0, map.len()); + + map.add_file( + b"meh", + EntryState::Normal, + DirstateEntry { + state: EntryState::Normal, + mode: 1337, + mtime: 1337, + size: 1337, + }, + ); + + assert_eq!(1, map.len()); + assert_eq!(0, map.non_normal_set.len()); + assert_eq!(0, map.other_parent_set.len()); + } + + #[test] + fn test_non_normal_other_parent_entries() { + let map: DirstateMap = [ + (b"f1", (EntryState::Removed, 1337, 1337, 1337)), + (b"f2", (EntryState::Normal, 1337, 1337, -1)), + (b"f3", (EntryState::Normal, 1337, 1337, 1337)), + (b"f4", (EntryState::Normal, 1337, -2, 1337)), + (b"f5", (EntryState::Added, 1337, 1337, 1337)), + (b"f6", (EntryState::Added, 1337, 1337, -1)), + (b"f7", (EntryState::Merged, 1337, 1337, -1)), + (b"f8", (EntryState::Merged, 1337, 1337, 1337)), + (b"f9", (EntryState::Merged, 1337, -2, 1337)), + (b"fa", (EntryState::Added, 1337, -2, 1337)), + (b"fb", (EntryState::Removed, 1337, -2, 1337)), + ] + .iter() + .map(|(fname, (state, mode, size, mtime))| { + ( + fname.to_vec(), + DirstateEntry { + state: *state, + mode: *mode, + size: *size, + mtime: *mtime, + }, + ) + }) + .collect(); + + let non_normal = [ + b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb", + ] + .iter() + .map(|x| x.to_vec()) + .collect(); + + let mut other_parent = HashSet::new(); + other_parent.insert(b"f4".to_vec()); + + assert_eq!( + (non_normal, other_parent), + map.non_normal_other_parent_entries() + ); + } +}
--- a/rust/hg-core/src/dirstate/parsers.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/dirstate/parsers.rs Mon Sep 09 17:26:17 2019 -0400 @@ -4,31 +4,34 @@ // GNU General Public License version 2 or any later version. use crate::{ - CopyVec, CopyVecEntry, DirstateEntry, DirstatePackError, DirstateParents, - DirstateParseError, DirstateVec, + dirstate::{CopyMap, EntryState, StateMap}, + DirstateEntry, DirstatePackError, DirstateParents, DirstateParseError, }; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; -use std::collections::HashMap; +use std::convert::{TryFrom, TryInto}; use std::io::Cursor; +use std::time::Duration; /// Parents are stored in the dirstate as byte hashes. -const PARENT_SIZE: usize = 20; +pub const PARENT_SIZE: usize = 20; /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits. const MIN_ENTRY_SIZE: usize = 17; +// TODO parse/pack: is mutate-on-loop better for performance? + pub fn parse_dirstate( + state_map: &mut StateMap, + copy_map: &mut CopyMap, contents: &[u8], -) -> Result<(DirstateParents, DirstateVec, CopyVec), DirstateParseError> { +) -> Result<DirstateParents, DirstateParseError> { if contents.len() < PARENT_SIZE * 2 { return Err(DirstateParseError::TooLittleData); } - let mut dirstate_vec = vec![]; - let mut copies = vec![]; let mut curr_pos = PARENT_SIZE * 2; let parents = DirstateParents { - p1: &contents[..PARENT_SIZE], - p2: &contents[PARENT_SIZE..curr_pos], + p1: contents[..PARENT_SIZE].try_into().unwrap(), + p2: contents[PARENT_SIZE..curr_pos].try_into().unwrap(), }; while curr_pos < contents.len() { @@ -38,7 +41,7 @@ let entry_bytes = &contents[curr_pos..]; let mut cursor = Cursor::new(entry_bytes); - let state = cursor.read_i8()?; + let state = EntryState::try_from(cursor.read_u8()?)?; let mode = cursor.read_i32::<BigEndian>()?; let size = cursor.read_i32::<BigEndian>()?; let mtime = cursor.read_i32::<BigEndian>()?; @@ -57,9 +60,9 @@ }; if let Some(copy_path) = copy { - copies.push(CopyVecEntry { path, copy_path }); + copy_map.insert(path.to_owned(), copy_path.to_owned()); }; - dirstate_vec.push(( + state_map.insert( path.to_owned(), DirstateEntry { state, @@ -67,28 +70,28 @@ size, mtime, }, - )); + ); curr_pos = curr_pos + MIN_ENTRY_SIZE + (path_len); } - Ok((parents, dirstate_vec, copies)) + Ok(parents) } +/// `now` is the duration in seconds since the Unix epoch pub fn pack_dirstate( - dirstate_vec: &DirstateVec, - copymap: &HashMap<Vec<u8>, Vec<u8>>, + state_map: &mut StateMap, + copy_map: &CopyMap, parents: DirstateParents, - now: i32, -) -> Result<(Vec<u8>, DirstateVec), DirstatePackError> { - if parents.p1.len() != PARENT_SIZE || parents.p2.len() != PARENT_SIZE { - return Err(DirstatePackError::CorruptedParent); - } + now: Duration, +) -> Result<Vec<u8>, DirstatePackError> { + // TODO move away from i32 before 2038. + let now: i32 = now.as_secs().try_into().expect("time overflow"); - let expected_size: usize = dirstate_vec + let expected_size: usize = state_map .iter() - .map(|(ref filename, _)| { + .map(|(filename, _)| { let mut length = MIN_ENTRY_SIZE + filename.len(); - if let Some(ref copy) = copymap.get(filename) { + if let Some(copy) = copy_map.get(filename) { length += copy.len() + 1; } length @@ -97,15 +100,15 @@ let expected_size = expected_size + PARENT_SIZE * 2; let mut packed = Vec::with_capacity(expected_size); - let mut new_dirstate_vec = vec![]; + let mut new_state_map = vec![]; - packed.extend(parents.p1); - packed.extend(parents.p2); + packed.extend(&parents.p1); + packed.extend(&parents.p2); - for (ref filename, entry) in dirstate_vec { + for (filename, entry) in state_map.iter() { let mut new_filename: Vec<u8> = filename.to_owned(); let mut new_mtime: i32 = entry.mtime; - if entry.state == 'n' as i8 && entry.mtime == now.into() { + if entry.state == EntryState::Normal && entry.mtime == now { // The file was last modified "simultaneously" with the current // write to dirstate (i.e. within the same second for file- // systems with a granularity of 1 sec). This commonly happens @@ -116,7 +119,7 @@ // contents of the file if the size is the same. This prevents // mistakenly treating such files as clean. new_mtime = -1; - new_dirstate_vec.push(( + new_state_map.push(( filename.to_owned(), DirstateEntry { mtime: new_mtime, @@ -125,12 +128,12 @@ )); } - if let Some(copy) = copymap.get(filename) { + if let Some(copy) = copy_map.get(filename) { new_filename.push('\0' as u8); new_filename.extend(copy); } - packed.write_i8(entry.state)?; + packed.write_u8(entry.state.into())?; packed.write_i32::<BigEndian>(entry.mode)?; packed.write_i32::<BigEndian>(entry.size)?; packed.write_i32::<BigEndian>(new_mtime)?; @@ -142,143 +145,155 @@ return Err(DirstatePackError::BadSize(expected_size, packed.len())); } - Ok((packed, new_dirstate_vec)) + state_map.extend(new_state_map); + + Ok(packed) } #[cfg(test)] mod tests { use super::*; + use std::collections::HashMap; #[test] fn test_pack_dirstate_empty() { - let dirstate_vec: DirstateVec = vec![]; + let mut state_map: StateMap = HashMap::new(); let copymap = HashMap::new(); let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", }; - let now: i32 = 15000000; - let expected = - (b"1234567891011121314100000000000000000000".to_vec(), vec![]); + let now = Duration::new(15000000, 0); + let expected = b"1234567891011121314100000000000000000000".to_vec(); assert_eq!( expected, - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap() + pack_dirstate(&mut state_map, ©map, parents, now).unwrap() ); + + assert!(state_map.is_empty()) } #[test] fn test_pack_dirstate_one_entry() { - let dirstate_vec: DirstateVec = vec![( - vec!['f' as u8, '1' as u8], - DirstateEntry { - state: 'n' as i8, - mode: 0o644, - size: 0, - mtime: 791231220, - }, - )]; - let copymap = HashMap::new(); - let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", - }; - let now: i32 = 15000000; - let expected = ( - [ - 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, - 49, 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, - 0, 0, 0, 47, 41, 58, 244, 0, 0, 0, 2, 102, 49, - ] - .to_vec(), - vec![], - ); - - assert_eq!( - expected, - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap() - ); - } - #[test] - fn test_pack_dirstate_one_entry_with_copy() { - let dirstate_vec: DirstateVec = vec![( + let expected_state_map: StateMap = [( b"f1".to_vec(), DirstateEntry { - state: 'n' as i8, + state: EntryState::Normal, mode: 0o644, size: 0, mtime: 791231220, }, - )]; + )] + .iter() + .cloned() + .collect(); + let mut state_map = expected_state_map.clone(); + + let copymap = HashMap::new(); + let parents = DirstateParents { + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", + }; + let now = Duration::new(15000000, 0); + let expected = [ + 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49, + 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47, + 41, 58, 244, 0, 0, 0, 2, 102, 49, + ] + .to_vec(); + + assert_eq!( + expected, + pack_dirstate(&mut state_map, ©map, parents, now).unwrap() + ); + + assert_eq!(expected_state_map, state_map); + } + #[test] + fn test_pack_dirstate_one_entry_with_copy() { + let expected_state_map: StateMap = [( + b"f1".to_vec(), + DirstateEntry { + state: EntryState::Normal, + mode: 0o644, + size: 0, + mtime: 791231220, + }, + )] + .iter() + .cloned() + .collect(); + let mut state_map = expected_state_map.clone(); let mut copymap = HashMap::new(); copymap.insert(b"f1".to_vec(), b"copyname".to_vec()); let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", }; - let now: i32 = 15000000; - let expected = ( - [ - 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, - 49, 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, - 0, 0, 0, 47, 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, - 112, 121, 110, 97, 109, 101, - ] - .to_vec(), - vec![], - ); + let now = Duration::new(15000000, 0); + let expected = [ + 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49, + 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47, + 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97, + 109, 101, + ] + .to_vec(); assert_eq!( expected, - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap() + pack_dirstate(&mut state_map, ©map, parents, now).unwrap() ); + assert_eq!(expected_state_map, state_map); } #[test] fn test_parse_pack_one_entry_with_copy() { - let dirstate_vec: DirstateVec = vec![( + let mut state_map: StateMap = [( b"f1".to_vec(), DirstateEntry { - state: 'n' as i8, + state: EntryState::Normal, mode: 0o644, size: 0, mtime: 791231220, }, - )]; + )] + .iter() + .cloned() + .collect(); let mut copymap = HashMap::new(); copymap.insert(b"f1".to_vec(), b"copyname".to_vec()); let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", }; - let now: i32 = 15000000; + let now = Duration::new(15000000, 0); let result = - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap(); + pack_dirstate(&mut state_map, ©map, parents.clone(), now) + .unwrap(); + let mut new_state_map: StateMap = HashMap::new(); + let mut new_copy_map: CopyMap = HashMap::new(); + let new_parents = parse_dirstate( + &mut new_state_map, + &mut new_copy_map, + result.as_slice(), + ) + .unwrap(); assert_eq!( - ( - parents, - dirstate_vec, - copymap - .iter() - .map(|(k, v)| CopyVecEntry { - path: k.as_slice(), - copy_path: v.as_slice() - }) - .collect() - ), - parse_dirstate(result.0.as_slice()).unwrap() + (parents, state_map, copymap), + (new_parents, new_state_map, new_copy_map) ) } #[test] fn test_parse_pack_multiple_entries_with_copy() { - let dirstate_vec: DirstateVec = vec![ + let mut state_map: StateMap = [ ( b"f1".to_vec(), DirstateEntry { - state: 'n' as i8, + state: EntryState::Normal, mode: 0o644, size: 0, mtime: 791231220, @@ -287,7 +302,7 @@ ( b"f2".to_vec(), DirstateEntry { - state: 'm' as i8, + state: EntryState::Merged, mode: 0o777, size: 1000, mtime: 791231220, @@ -296,7 +311,7 @@ ( b"f3".to_vec(), DirstateEntry { - state: 'r' as i8, + state: EntryState::Removed, mode: 0o644, size: 234553, mtime: 791231220, @@ -305,84 +320,95 @@ ( b"f4\xF6".to_vec(), DirstateEntry { - state: 'a' as i8, + state: EntryState::Added, mode: 0o644, size: -1, mtime: -1, }, ), - ]; + ] + .iter() + .cloned() + .collect(); let mut copymap = HashMap::new(); copymap.insert(b"f1".to_vec(), b"copyname".to_vec()); copymap.insert(b"f4\xF6".to_vec(), b"copyname2".to_vec()); let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", }; - let now: i32 = 15000000; + let now = Duration::new(15000000, 0); let result = - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap(); + pack_dirstate(&mut state_map, ©map, parents.clone(), now) + .unwrap(); + let mut new_state_map: StateMap = HashMap::new(); + let mut new_copy_map: CopyMap = HashMap::new(); + let new_parents = parse_dirstate( + &mut new_state_map, + &mut new_copy_map, + result.as_slice(), + ) + .unwrap(); assert_eq!( - (parents, dirstate_vec, copymap), - parse_dirstate(result.0.as_slice()) - .and_then(|(p, dvec, cvec)| Ok(( - p, - dvec, - cvec.iter() - .map(|entry| ( - entry.path.to_vec(), - entry.copy_path.to_vec() - )) - .collect() - ))) - .unwrap() + (parents, state_map, copymap), + (new_parents, new_state_map, new_copy_map) ) } #[test] /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4 fn test_parse_pack_one_entry_with_copy_and_time_conflict() { - let dirstate_vec: DirstateVec = vec![( + let mut state_map: StateMap = [( b"f1".to_vec(), DirstateEntry { - state: 'n' as i8, + state: EntryState::Normal, mode: 0o644, size: 0, mtime: 15000000, }, - )]; + )] + .iter() + .cloned() + .collect(); let mut copymap = HashMap::new(); copymap.insert(b"f1".to_vec(), b"copyname".to_vec()); let parents = DirstateParents { - p1: b"12345678910111213141", - p2: b"00000000000000000000", + p1: *b"12345678910111213141", + p2: *b"00000000000000000000", }; - let now: i32 = 15000000; + let now = Duration::new(15000000, 0); let result = - pack_dirstate(&dirstate_vec, ©map, parents, now).unwrap(); + pack_dirstate(&mut state_map, ©map, parents.clone(), now) + .unwrap(); + + let mut new_state_map: StateMap = HashMap::new(); + let mut new_copy_map: CopyMap = HashMap::new(); + let new_parents = parse_dirstate( + &mut new_state_map, + &mut new_copy_map, + result.as_slice(), + ) + .unwrap(); assert_eq!( ( parents, - vec![( + [( b"f1".to_vec(), DirstateEntry { - state: 'n' as i8, + state: EntryState::Normal, mode: 0o644, size: 0, mtime: -1 } - )], - copymap - .iter() - .map(|(k, v)| CopyVecEntry { - path: k.as_slice(), - copy_path: v.as_slice() - }) - .collect() + )] + .iter() + .cloned() + .collect::<StateMap>(), + copymap, ), - parse_dirstate(result.0.as_slice()).unwrap() + (new_parents, new_state_map, new_copy_map) ) } }
--- a/rust/hg-core/src/discovery.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/discovery.rs Mon Sep 09 17:26:17 2019 -0400 @@ -10,23 +10,124 @@ //! This is a Rust counterpart to the `partialdiscovery` class of //! `mercurial.setdiscovery` -use super::{Graph, GraphError, Revision}; +use super::{Graph, GraphError, Revision, NULL_REVISION}; use crate::ancestors::MissingAncestors; use crate::dagops; -use std::collections::HashSet; +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore, SeedableRng}; +use std::cmp::{max, min}; +use std::collections::{HashMap, HashSet, VecDeque}; + +type Rng = rand_pcg::Pcg32; pub struct PartialDiscovery<G: Graph + Clone> { target_heads: Option<Vec<Revision>>, graph: G, // plays the role of self._repo common: MissingAncestors<G>, undecided: Option<HashSet<Revision>>, + children_cache: Option<HashMap<Revision, Vec<Revision>>>, missing: HashSet<Revision>, + rng: Rng, + respect_size: bool, + randomize: bool, } pub struct DiscoveryStats { pub undecided: Option<usize>, } +/// Update an existing sample to match the expected size +/// +/// The sample is updated with revisions exponentially distant from each +/// element of `heads`. +/// +/// If a target size is specified, the sampling will stop once this size is +/// reached. Otherwise sampling will happen until roots of the <revs> set are +/// reached. +/// +/// - `revs`: set of revs we want to discover (if None, `assume` the whole dag +/// represented by `parentfn` +/// - `heads`: set of DAG head revs +/// - `sample`: a sample to update +/// - `parentfn`: a callable to resolve parents for a revision +/// - `quicksamplesize`: optional target size of the sample +fn update_sample<I>( + revs: Option<&HashSet<Revision>>, + heads: impl IntoIterator<Item = Revision>, + sample: &mut HashSet<Revision>, + parentsfn: impl Fn(Revision) -> Result<I, GraphError>, + quicksamplesize: Option<usize>, +) -> Result<(), GraphError> +where + I: Iterator<Item = Revision>, +{ + let mut distances: HashMap<Revision, u32> = HashMap::new(); + let mut visit: VecDeque<Revision> = heads.into_iter().collect(); + let mut factor: u32 = 1; + let mut seen: HashSet<Revision> = HashSet::new(); + while let Some(current) = visit.pop_front() { + if !seen.insert(current) { + continue; + } + + let d = *distances.entry(current).or_insert(1); + if d > factor { + factor *= 2; + } + if d == factor { + sample.insert(current); + if let Some(sz) = quicksamplesize { + if sample.len() >= sz { + return Ok(()); + } + } + } + for p in parentsfn(current)? { + if let Some(revs) = revs { + if !revs.contains(&p) { + continue; + } + } + distances.entry(p).or_insert(d + 1); + visit.push_back(p); + } + } + Ok(()) +} + +struct ParentsIterator { + parents: [Revision; 2], + cur: usize, +} + +impl ParentsIterator { + fn graph_parents( + graph: &impl Graph, + r: Revision, + ) -> Result<ParentsIterator, GraphError> { + Ok(ParentsIterator { + parents: graph.parents(r)?, + cur: 0, + }) + } +} + +impl Iterator for ParentsIterator { + type Item = Revision; + + fn next(&mut self) -> Option<Revision> { + if self.cur > 1 { + return None; + } + let rev = self.parents[self.cur]; + self.cur += 1; + if rev == NULL_REVISION { + return self.next(); + } + Some(rev) + } +} + impl<G: Graph + Clone> PartialDiscovery<G> { /// Create a PartialDiscovery object, with the intent /// of comparing our `::<target_heads>` revset to the contents of another @@ -38,22 +139,89 @@ /// If we want to make the signature more flexible, /// we'll have to make it a type argument of `PartialDiscovery` or a trait /// object since we'll keep it in the meanwhile - pub fn new(graph: G, target_heads: Vec<Revision>) -> Self { + /// + /// The `respect_size` boolean controls how the sampling methods + /// will interpret the size argument requested by the caller. If it's + /// `false`, they are allowed to produce a sample whose size is more + /// appropriate to the situation (typically bigger). + /// + /// The `randomize` boolean affects sampling, and specifically how + /// limiting or last-minute expanding is been done: + /// + /// If `true`, both will perform random picking from `self.undecided`. + /// This is currently the best for actual discoveries. + /// + /// If `false`, a reproductible picking strategy is performed. This is + /// useful for integration tests. + pub fn new( + graph: G, + target_heads: Vec<Revision>, + respect_size: bool, + randomize: bool, + ) -> Self { + let mut seed: [u8; 16] = [0; 16]; + if randomize { + thread_rng().fill_bytes(&mut seed); + } + Self::new_with_seed(graph, target_heads, seed, respect_size, randomize) + } + + pub fn new_with_seed( + graph: G, + target_heads: Vec<Revision>, + seed: [u8; 16], + respect_size: bool, + randomize: bool, + ) -> Self { PartialDiscovery { undecided: None, + children_cache: None, target_heads: Some(target_heads), graph: graph.clone(), common: MissingAncestors::new(graph, vec![]), missing: HashSet::new(), + rng: Rng::from_seed(seed), + respect_size: respect_size, + randomize: randomize, } } + /// Extract at most `size` random elements from sample and return them + /// as a vector + fn limit_sample( + &mut self, + mut sample: Vec<Revision>, + size: usize, + ) -> Vec<Revision> { + if !self.randomize { + sample.sort(); + sample.truncate(size); + return sample; + } + let sample_len = sample.len(); + if sample_len <= size { + return sample; + } + let rng = &mut self.rng; + let dropped_size = sample_len - size; + let limited_slice = if size < dropped_size { + sample.partial_shuffle(rng, size).0 + } else { + sample.partial_shuffle(rng, dropped_size).1 + }; + limited_slice.to_owned() + } + /// Register revisions known as being common pub fn add_common_revisions( &mut self, common: impl IntoIterator<Item = Revision>, ) -> Result<(), GraphError> { + let before_len = self.common.get_bases().len(); self.common.add_bases(common); + if self.common.get_bases().len() == before_len { + return Ok(()); + } if let Some(ref mut undecided) = self.undecided { self.common.remove_ancestors_from(undecided)?; } @@ -61,20 +229,50 @@ } /// Register revisions known as being missing + /// + /// # Performance note + /// + /// Except in the most trivial case, the first call of this method has + /// the side effect of computing `self.undecided` set for the first time, + /// and the related caches it might need for efficiency of its internal + /// computation. This is typically faster if more information is + /// available in `self.common`. Therefore, for good performance, the + /// caller should avoid calling this too early. pub fn add_missing_revisions( &mut self, missing: impl IntoIterator<Item = Revision>, ) -> Result<(), GraphError> { - self.ensure_undecided()?; - let range = dagops::range( - &self.graph, - missing, - self.undecided.as_ref().unwrap().iter().cloned(), - )?; + let mut tovisit: VecDeque<Revision> = missing.into_iter().collect(); + if tovisit.is_empty() { + return Ok(()); + } + self.ensure_children_cache()?; + self.ensure_undecided()?; // for safety of possible future refactors + let children = self.children_cache.as_ref().unwrap(); + let mut seen: HashSet<Revision> = HashSet::new(); let undecided_mut = self.undecided.as_mut().unwrap(); - for missrev in range { - self.missing.insert(missrev); - undecided_mut.remove(&missrev); + while let Some(rev) = tovisit.pop_front() { + if !self.missing.insert(rev) { + // either it's known to be missing from a previous + // invocation, and there's no need to iterate on its + // children (we now they are all missing) + // or it's from a previous iteration of this loop + // and its children have already been queued + continue; + } + undecided_mut.remove(&rev); + match children.get(&rev) { + None => { + continue; + } + Some(this_children) => { + for child in this_children.iter().cloned() { + if seen.insert(child) { + tovisit.push_back(child); + } + } + } + } } Ok(()) } @@ -124,12 +322,157 @@ Ok(()) } + fn ensure_children_cache(&mut self) -> Result<(), GraphError> { + if self.children_cache.is_some() { + return Ok(()); + } + self.ensure_undecided()?; + + let mut children: HashMap<Revision, Vec<Revision>> = HashMap::new(); + for &rev in self.undecided.as_ref().unwrap() { + for p in ParentsIterator::graph_parents(&self.graph, rev)? { + children.entry(p).or_insert_with(|| Vec::new()).push(rev); + } + } + self.children_cache = Some(children); + Ok(()) + } + /// Provide statistics about the current state of the discovery process pub fn stats(&self) -> DiscoveryStats { DiscoveryStats { undecided: self.undecided.as_ref().map(|s| s.len()), } } + + pub fn take_quick_sample( + &mut self, + headrevs: impl IntoIterator<Item = Revision>, + size: usize, + ) -> Result<Vec<Revision>, GraphError> { + self.ensure_undecided()?; + let mut sample = { + let undecided = self.undecided.as_ref().unwrap(); + if undecided.len() <= size { + return Ok(undecided.iter().cloned().collect()); + } + dagops::heads(&self.graph, undecided.iter())? + }; + if sample.len() >= size { + return Ok(self.limit_sample(sample.into_iter().collect(), size)); + } + update_sample( + None, + headrevs, + &mut sample, + |r| ParentsIterator::graph_parents(&self.graph, r), + Some(size), + )?; + Ok(sample.into_iter().collect()) + } + + /// Extract a sample from `self.undecided`, going from its heads and roots. + /// + /// The `size` parameter is used to avoid useless computations if + /// it turns out to be bigger than the whole set of undecided Revisions. + /// + /// The sample is taken by using `update_sample` from the heads, then + /// from the roots, working on the reverse DAG, + /// expressed by `self.children_cache`. + /// + /// No effort is being made to complete or limit the sample to `size` + /// but this method returns another interesting size that it derives + /// from its knowledge of the structure of the various sets, leaving + /// to the caller the decision to use it or not. + fn bidirectional_sample( + &mut self, + size: usize, + ) -> Result<(HashSet<Revision>, usize), GraphError> { + self.ensure_undecided()?; + { + // we don't want to compute children_cache before this + // but doing it after extracting self.undecided takes a mutable + // ref to self while a shareable one is still active. + let undecided = self.undecided.as_ref().unwrap(); + if undecided.len() <= size { + return Ok((undecided.clone(), size)); + } + } + + self.ensure_children_cache()?; + let revs = self.undecided.as_ref().unwrap(); + let mut sample: HashSet<Revision> = revs.clone(); + + // it's possible that leveraging the children cache would be more + // efficient here + dagops::retain_heads(&self.graph, &mut sample)?; + let revsheads = sample.clone(); // was again heads(revs) in python + + // update from heads + update_sample( + Some(revs), + revsheads.iter().cloned(), + &mut sample, + |r| ParentsIterator::graph_parents(&self.graph, r), + None, + )?; + + // update from roots + let revroots: HashSet<Revision> = + dagops::roots(&self.graph, revs)?.into_iter().collect(); + let prescribed_size = max(size, min(revroots.len(), revsheads.len())); + + let children = self.children_cache.as_ref().unwrap(); + let empty_vec: Vec<Revision> = Vec::new(); + update_sample( + Some(revs), + revroots, + &mut sample, + |r| Ok(children.get(&r).unwrap_or(&empty_vec).iter().cloned()), + None, + )?; + Ok((sample, prescribed_size)) + } + + /// Fill up sample up to the wished size with random undecided Revisions. + /// + /// This is intended to be used as a last resort completion if the + /// regular sampling algorithm returns too few elements. + fn random_complete_sample( + &mut self, + sample: &mut Vec<Revision>, + size: usize, + ) { + let sample_len = sample.len(); + if size <= sample_len { + return; + } + let take_from: Vec<Revision> = self + .undecided + .as_ref() + .unwrap() + .iter() + .filter(|&r| !sample.contains(r)) + .cloned() + .collect(); + sample.extend(self.limit_sample(take_from, size - sample_len)); + } + + pub fn take_full_sample( + &mut self, + size: usize, + ) -> Result<Vec<Revision>, GraphError> { + let (sample_set, prescribed_size) = self.bidirectional_sample(size)?; + let size = if self.respect_size { + size + } else { + prescribed_size + }; + let mut sample = + self.limit_sample(sample_set.into_iter().collect(), size); + self.random_complete_sample(&mut sample, size); + Ok(sample) + } } #[cfg(test)] @@ -138,8 +481,30 @@ use crate::testing::SampleGraph; /// A PartialDiscovery as for pushing all the heads of `SampleGraph` + /// + /// To avoid actual randomness in these tests, we give it a fixed + /// random seed, but by default we'll test the random version. fn full_disco() -> PartialDiscovery<SampleGraph> { - PartialDiscovery::new(SampleGraph, vec![10, 11, 12, 13]) + PartialDiscovery::new_with_seed( + SampleGraph, + vec![10, 11, 12, 13], + [0; 16], + true, + true, + ) + } + + /// A PartialDiscovery as for pushing the 12 head of `SampleGraph` + /// + /// To avoid actual randomness in tests, we give it a fixed random seed. + fn disco12() -> PartialDiscovery<SampleGraph> { + PartialDiscovery::new_with_seed( + SampleGraph, + vec![12], + [0; 16], + true, + true, + ) } fn sorted_undecided( @@ -206,4 +571,124 @@ assert_eq!(sorted_common_heads(&disco)?, vec![5, 11, 12]); Ok(()) } + + #[test] + fn test_add_missing_early_continue() -> Result<(), GraphError> { + eprintln!("test_add_missing_early_stop"); + let mut disco = full_disco(); + disco.add_common_revisions(vec![13, 3, 4])?; + disco.ensure_children_cache()?; + // 12 is grand-child of 6 through 9 + // passing them in this order maximizes the chances of the + // early continue to do the wrong thing + disco.add_missing_revisions(vec![6, 9, 12])?; + assert_eq!(sorted_undecided(&disco), vec![5, 7, 10, 11]); + assert_eq!(sorted_missing(&disco), vec![6, 9, 12]); + assert!(!disco.is_complete()); + Ok(()) + } + + #[test] + fn test_limit_sample_no_need_to() { + let sample = vec![1, 2, 3, 4]; + assert_eq!(full_disco().limit_sample(sample, 10), vec![1, 2, 3, 4]); + } + + #[test] + fn test_limit_sample_less_than_half() { + assert_eq!(full_disco().limit_sample((1..6).collect(), 2), vec![4, 2]); + } + + #[test] + fn test_limit_sample_more_than_half() { + assert_eq!(full_disco().limit_sample((1..4).collect(), 2), vec![3, 2]); + } + + #[test] + fn test_limit_sample_no_random() { + let mut disco = full_disco(); + disco.randomize = false; + assert_eq!( + disco.limit_sample(vec![1, 8, 13, 5, 7, 3], 4), + vec![1, 3, 5, 7] + ); + } + + #[test] + fn test_quick_sample_enough_undecided_heads() -> Result<(), GraphError> { + let mut disco = full_disco(); + disco.undecided = Some((1..=13).collect()); + + let mut sample_vec = disco.take_quick_sample(vec![], 4)?; + sample_vec.sort(); + assert_eq!(sample_vec, vec![10, 11, 12, 13]); + Ok(()) + } + + #[test] + fn test_quick_sample_climbing_from_12() -> Result<(), GraphError> { + let mut disco = disco12(); + disco.ensure_undecided()?; + + let mut sample_vec = disco.take_quick_sample(vec![12], 4)?; + sample_vec.sort(); + // r12's only parent is r9, whose unique grand-parent through the + // diamond shape is r4. This ends there because the distance from r4 + // to the root is only 3. + assert_eq!(sample_vec, vec![4, 9, 12]); + Ok(()) + } + + #[test] + fn test_children_cache() -> Result<(), GraphError> { + let mut disco = full_disco(); + disco.ensure_children_cache()?; + + let cache = disco.children_cache.unwrap(); + assert_eq!(cache.get(&2).cloned(), Some(vec![4])); + assert_eq!(cache.get(&10).cloned(), None); + + let mut children_4 = cache.get(&4).cloned().unwrap(); + children_4.sort(); + assert_eq!(children_4, vec![5, 6, 7]); + + let mut children_7 = cache.get(&7).cloned().unwrap(); + children_7.sort(); + assert_eq!(children_7, vec![9, 11]); + + Ok(()) + } + + #[test] + fn test_complete_sample() { + let mut disco = full_disco(); + let undecided: HashSet<Revision> = + [4, 7, 9, 2, 3].iter().cloned().collect(); + disco.undecided = Some(undecided); + + let mut sample = vec![0]; + disco.random_complete_sample(&mut sample, 3); + assert_eq!(sample.len(), 3); + + let mut sample = vec![2, 4, 7]; + disco.random_complete_sample(&mut sample, 1); + assert_eq!(sample.len(), 3); + } + + #[test] + fn test_bidirectional_sample() -> Result<(), GraphError> { + let mut disco = full_disco(); + disco.undecided = Some((0..=13).into_iter().collect()); + + let (sample_set, size) = disco.bidirectional_sample(7)?; + assert_eq!(size, 7); + let mut sample: Vec<Revision> = sample_set.into_iter().collect(); + sample.sort(); + // our DAG is a bit too small for the results to be really interesting + // at least it shows that + // - we went both ways + // - we didn't take all Revisions (6 is not in the sample) + assert_eq!(sample, vec![0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]); + Ok(()) + } }
--- a/rust/hg-core/src/filepatterns.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/filepatterns.rs Mon Sep 09 17:26:17 2019 -0400 @@ -1,3 +1,12 @@ +// filepatterns.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Handling of Mercurial-specific patterns. + use crate::{ utils::{files::get_path_from_bytes, SliceExt}, LineNumber, PatternError, PatternFileError, @@ -29,7 +38,8 @@ Regexp, /// Glob that matches at the front of the path RootGlob, - /// Glob that matches at any suffix of the path (still anchored at slashes) + /// Glob that matches at any suffix of the path (still anchored at + /// slashes) Glob, Path, RelPath, @@ -50,8 +60,8 @@ match c { b'*' => { for (source, repl) in GLOB_REPLACEMENTS { - if input.starts_with(source) { - input = &input[source.len()..]; + if let Some(rest) = input.drop_prefix(source) { + input = rest; res.extend(*repl); break; } @@ -149,43 +159,36 @@ if pattern[0] == b'^' { return pattern.to_owned(); } - let mut res = b".*".to_vec(); - res.extend(pattern); - res + [b".*", pattern].concat() } PatternSyntax::Path | PatternSyntax::RelPath => { if pattern == b"." { return vec![]; } - let mut pattern = escape_pattern(pattern); - pattern.extend(b"(?:/|$)"); - pattern + [escape_pattern(pattern).as_slice(), b"(?:/|$)"].concat() } PatternSyntax::RootFiles => { let mut res = if pattern == b"." { vec![] } else { // Pattern is a directory name. - let mut as_vec: Vec<u8> = escape_pattern(pattern); - as_vec.push(b'/'); - as_vec + [escape_pattern(pattern).as_slice(), b"/"].concat() }; // Anything after the pattern must be a non-directory. res.extend(b"[^/]+$"); res } - PatternSyntax::Glob - | PatternSyntax::RelGlob - | PatternSyntax::RootGlob => { - let mut res: Vec<u8> = vec![]; - if syntax == PatternSyntax::RelGlob { - res.extend(b"(?:|.*/)"); + PatternSyntax::RelGlob => { + let glob_re = glob_to_re(pattern); + if let Some(rest) = glob_re.drop_prefix(b"[^/]*") { + [b".*", rest, globsuffix].concat() + } else { + [b"(?:|.*/)", glob_re.as_slice(), globsuffix].concat() } - - res.extend(glob_to_re(pattern)); - res.extend(globsuffix.iter()); - res + } + PatternSyntax::Glob | PatternSyntax::RootGlob => { + [glob_to_re(pattern).as_slice(), globsuffix].concat() } } } @@ -259,8 +262,8 @@ continue; } - if line.starts_with(b"syntax:") { - let syntax = line[b"syntax:".len()..].trim(); + if let Some(syntax) = line.drop_prefix(b"syntax:") { + let syntax = syntax.trim(); if let Some(rel_syntax) = SYNTAXES.get(syntax) { current_syntax = rel_syntax; @@ -273,13 +276,14 @@ let mut line_syntax: &[u8] = ¤t_syntax; for (s, rels) in SYNTAXES.iter() { - if line.starts_with(rels) { + if let Some(rest) = line.drop_prefix(rels) { line_syntax = rels; - line = &line[rels.len()..]; + line = rest; break; - } else if line.starts_with(&[s, b":".as_ref()].concat()) { + } + if let Some(rest) = line.drop_prefix(&[s, &b":"[..]].concat()) { line_syntax = rels; - line = &line[s.len() + 1..]; + line = rest; break; } }
--- a/rust/hg-core/src/lib.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/lib.rs Mon Sep 09 17:26:17 2019 -0400 @@ -9,10 +9,11 @@ pub mod discovery; pub mod testing; // unconditionally built, for use from integration tests pub use dirstate::{ - dirs_multiset::DirsMultiset, - parsers::{pack_dirstate, parse_dirstate}, - CopyVec, CopyVecEntry, DirsIterable, DirstateEntry, DirstateParents, - DirstateVec, + dirs_multiset::{DirsMultiset, DirsMultisetIter}, + dirstate_map::DirstateMap, + parsers::{pack_dirstate, parse_dirstate, PARENT_SIZE}, + CopyMap, CopyMapIter, DirstateEntry, DirstateParents, EntryState, + StateMap, StateMapIter, }; mod filepatterns; pub mod utils; @@ -60,6 +61,25 @@ TooLittleData, Overflow, CorruptedEntry(String), + Damaged, +} + +impl From<std::io::Error> for DirstateParseError { + fn from(e: std::io::Error) -> Self { + DirstateParseError::CorruptedEntry(e.to_string()) + } +} + +impl ToString for DirstateParseError { + fn to_string(&self) -> String { + use crate::DirstateParseError::*; + match self { + TooLittleData => "Too little data for dirstate.".to_string(), + Overflow => "Overflow in dirstate.".to_string(), + CorruptedEntry(e) => format!("Corrupted entry: {:?}.", e), + Damaged => "Dirstate appears to be damaged.".to_string(), + } + } } #[derive(Debug, PartialEq)] @@ -69,21 +89,33 @@ BadSize(usize, usize), } +impl From<std::io::Error> for DirstatePackError { + fn from(e: std::io::Error) -> Self { + DirstatePackError::CorruptedEntry(e.to_string()) + } +} #[derive(Debug, PartialEq)] pub enum DirstateMapError { PathNotFound(Vec<u8>), EmptyPath, } -impl From<std::io::Error> for DirstatePackError { - fn from(e: std::io::Error) -> Self { - DirstatePackError::CorruptedEntry(e.to_string()) +pub enum DirstateError { + Parse(DirstateParseError), + Pack(DirstatePackError), + Map(DirstateMapError), + IO(std::io::Error), +} + +impl From<DirstateParseError> for DirstateError { + fn from(e: DirstateParseError) -> Self { + DirstateError::Parse(e) } } -impl From<std::io::Error> for DirstateParseError { - fn from(e: std::io::Error) -> Self { - DirstateParseError::CorruptedEntry(e.to_string()) +impl From<DirstatePackError> for DirstateError { + fn from(e: DirstatePackError) -> Self { + DirstateError::Pack(e) } } @@ -103,3 +135,15 @@ PatternFileError::IO(e) } } + +impl From<DirstateMapError> for DirstateError { + fn from(e: DirstateMapError) -> Self { + DirstateError::Map(e) + } +} + +impl From<std::io::Error> for DirstateError { + fn from(e: std::io::Error) -> Self { + DirstateError::IO(e) + } +}
--- a/rust/hg-core/src/utils.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/utils.rs Mon Sep 09 17:26:17 2019 -0400 @@ -1,3 +1,12 @@ +// utils module +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Contains useful functions, traits, structs, etc. for use in core. + pub mod files; /// Replaces the `from` slice with the `to` slice inside the `buf` slice. @@ -11,8 +20,7 @@ /// assert_eq!( /// line, /// b"I love writing tests!".to_vec() -///); -/// +/// ); /// ``` pub fn replace_slice<T>(buf: &mut [T], from: &[T], to: &[T]) where @@ -32,6 +40,7 @@ fn trim_end(&self) -> &Self; fn trim_start(&self) -> &Self; fn trim(&self) -> &Self; + fn drop_prefix(&self, needle: &Self) -> Option<&Self>; } fn is_not_whitespace(c: &u8) -> bool { @@ -72,4 +81,12 @@ fn trim(&self) -> &[u8] { self.trim_start().trim_end() } + + fn drop_prefix(&self, needle: &Self) -> Option<&Self> { + if self.starts_with(needle) { + Some(&self[needle.len()..]) + } else { + None + } + } }
--- a/rust/hg-core/src/utils/files.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-core/src/utils/files.rs Mon Sep 09 17:26:17 2019 -0400 @@ -1,3 +1,14 @@ +// files.rs +// +// Copyright 2019 +// Raphaël Gomès <rgomes@octobus.net>, +// Yuya Nishihara <yuya@tcha.org> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Functions for fiddling with files. + use std::iter::FusedIterator; use std::path::Path; @@ -60,6 +71,15 @@ dirs } +/// TODO improve handling of utf8 file names. Our overall strategy for +/// filenames has to be revisited anyway, since Windows is UTF-16. +pub fn normalize_case(bytes: &[u8]) -> Vec<u8> { + #[cfg(windows)] // NTFS compares via upper() + return bytes.to_ascii_uppercase(); + #[cfg(unix)] + bytes.to_ascii_lowercase() +} + #[cfg(test)] mod tests { #[test]
--- a/rust/hg-cpython/src/dirstate.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-cpython/src/dirstate.rs Mon Sep 09 17:26:17 2019 -0400 @@ -9,23 +9,21 @@ //! `hg-core` package. //! //! From Python, this will be seen as `mercurial.rustext.dirstate` - +mod copymap; +mod dirs_multiset; +mod dirstate_map; +use crate::dirstate::{dirs_multiset::Dirs, dirstate_map::DirstateMap}; use cpython::{ - exc, ObjectProtocol, PyBytes, PyDict, PyErr, PyInt, PyModule, PyObject, - PyResult, PySequence, PyTuple, Python, PythonObject, ToPyObject, + exc, PyBytes, PyDict, PyErr, PyModule, PyObject, PyResult, PySequence, + Python, }; -use hg::{ - pack_dirstate, parse_dirstate, CopyVecEntry, DirsIterable, DirsMultiset, - DirstateEntry, DirstateMapError, DirstatePackError, DirstateParents, - DirstateParseError, DirstateVec, -}; +use hg::{DirstateEntry, DirstateParseError, EntryState, StateMap}; use libc::{c_char, c_int}; #[cfg(feature = "python27")] use python27_sys::PyCapsule_Import; #[cfg(feature = "python3")] use python3_sys::PyCapsule_Import; -use std::cell::RefCell; -use std::collections::HashMap; +use std::convert::TryFrom; use std::ffi::CStr; use std::mem::transmute; @@ -45,7 +43,9 @@ /// This is largely a copy/paste from cindex.rs, pending the merge of a /// `py_capsule_fn!` macro in the rust-cpython project: /// https://github.com/dgrunwald/rust-cpython/pull/169 -fn decapsule_make_dirstate_tuple(py: Python) -> PyResult<MakeDirstateTupleFn> { +pub fn decapsule_make_dirstate_tuple( + py: Python, +) -> PyResult<MakeDirstateTupleFn> { unsafe { let caps_name = CStr::from_bytes_with_nul_unchecked( b"mercurial.cext.parsers.make_dirstate_tuple_CAPI\0", @@ -58,61 +58,17 @@ } } -fn parse_dirstate_wrapper( - py: Python, - dmap: PyDict, - copymap: PyDict, - st: PyBytes, -) -> PyResult<PyTuple> { - match parse_dirstate(st.data(py)) { - Ok((parents, dirstate_vec, copies)) => { - for (filename, entry) in dirstate_vec { - dmap.set_item( - py, - PyBytes::new(py, &filename[..]), - decapsule_make_dirstate_tuple(py)?( - entry.state as c_char, - entry.mode, - entry.size, - entry.mtime, - ), - )?; - } - for CopyVecEntry { path, copy_path } in copies { - copymap.set_item( - py, - PyBytes::new(py, path), - PyBytes::new(py, copy_path), - )?; - } - Ok((PyBytes::new(py, parents.p1), PyBytes::new(py, parents.p2)) - .to_py_object(py)) - } - Err(e) => Err(PyErr::new::<exc::ValueError, _>( - py, - match e { - DirstateParseError::TooLittleData => { - "too little data for parents".to_string() - } - DirstateParseError::Overflow => { - "overflow in dirstate".to_string() - } - DirstateParseError::CorruptedEntry(e) => e, - }, - )), - } -} - -fn extract_dirstate_vec( - py: Python, - dmap: &PyDict, -) -> Result<DirstateVec, PyErr> { +pub fn extract_dirstate(py: Python, dmap: &PyDict) -> Result<StateMap, PyErr> { dmap.items(py) .iter() .map(|(filename, stats)| { let stats = stats.extract::<PySequence>(py)?; let state = stats.get_item(py, 0)?.extract::<PyBytes>(py)?; - let state = state.data(py)[0] as i8; + let state = EntryState::try_from(state.data(py)[0]).map_err( + |e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + }, + )?; let mode = stats.get_item(py, 1)?.extract(py)?; let size = stats.get_item(py, 2)?.extract(py)?; let mtime = stats.get_item(py, 3)?.extract(py)?; @@ -131,167 +87,6 @@ .collect() } -fn pack_dirstate_wrapper( - py: Python, - dmap: PyDict, - copymap: PyDict, - pl: PyTuple, - now: PyInt, -) -> PyResult<PyBytes> { - let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?; - let p1: &[u8] = p1.data(py); - let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?; - let p2: &[u8] = p2.data(py); - - let dirstate_vec = extract_dirstate_vec(py, &dmap)?; - - let copies: Result<HashMap<Vec<u8>, Vec<u8>>, PyErr> = copymap - .items(py) - .iter() - .map(|(key, value)| { - Ok(( - key.extract::<PyBytes>(py)?.data(py).to_owned(), - value.extract::<PyBytes>(py)?.data(py).to_owned(), - )) - }) - .collect(); - - match pack_dirstate( - &dirstate_vec, - &copies?, - DirstateParents { p1, p2 }, - now.as_object().extract::<i32>(py)?, - ) { - Ok((packed, new_dirstate_vec)) => { - for ( - filename, - DirstateEntry { - state, - mode, - size, - mtime, - }, - ) in new_dirstate_vec - { - dmap.set_item( - py, - PyBytes::new(py, &filename[..]), - decapsule_make_dirstate_tuple(py)?( - state as c_char, - mode, - size, - mtime, - ), - )?; - } - Ok(PyBytes::new(py, &packed)) - } - Err(error) => Err(PyErr::new::<exc::ValueError, _>( - py, - match error { - DirstatePackError::CorruptedParent => { - "expected a 20-byte hash".to_string() - } - DirstatePackError::CorruptedEntry(e) => e, - DirstatePackError::BadSize(expected, actual) => { - format!("bad dirstate size: {} != {}", actual, expected) - } - }, - )), - } -} - -py_class!(pub class Dirs |py| { - data dirs_map: RefCell<DirsMultiset>; - - // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes - // a `list`) - def __new__( - _cls, - map: PyObject, - skip: Option<PyObject> = None - ) -> PyResult<Self> { - let mut skip_state: Option<i8> = None; - if let Some(skip) = skip { - skip_state = Some(skip.extract::<PyBytes>(py)?.data(py)[0] as i8); - } - let dirs_map; - - if let Ok(map) = map.cast_as::<PyDict>(py) { - let dirstate_vec = extract_dirstate_vec(py, &map)?; - dirs_map = DirsMultiset::new( - DirsIterable::Dirstate(dirstate_vec), - skip_state, - ) - } else { - let map: Result<Vec<Vec<u8>>, PyErr> = map - .iter(py)? - .map(|o| Ok(o?.extract::<PyBytes>(py)?.data(py).to_owned())) - .collect(); - dirs_map = DirsMultiset::new( - DirsIterable::Manifest(map?), - skip_state, - ) - } - - Self::create_instance(py, RefCell::new(dirs_map)) - } - - def addpath(&self, path: PyObject) -> PyResult<PyObject> { - self.dirs_map(py).borrow_mut().add_path( - path.extract::<PyBytes>(py)?.data(py), - ); - Ok(py.None()) - } - - def delpath(&self, path: PyObject) -> PyResult<PyObject> { - self.dirs_map(py).borrow_mut().delete_path( - path.extract::<PyBytes>(py)?.data(py), - ) - .and(Ok(py.None())) - .or_else(|e| { - match e { - DirstateMapError::PathNotFound(_p) => { - Err(PyErr::new::<exc::ValueError, _>( - py, - "expected a value, found none".to_string(), - )) - } - DirstateMapError::EmptyPath => { - Ok(py.None()) - } - } - }) - } - - // This is really inefficient on top of being ugly, but it's an easy way - // of having it work to continue working on the rest of the module - // hopefully bypassing Python entirely pretty soon. - def __iter__(&self) -> PyResult<PyObject> { - let dict = PyDict::new(py); - - for (key, value) in self.dirs_map(py).borrow().iter() { - dict.set_item( - py, - PyBytes::new(py, &key[..]), - value.to_py_object(py), - )?; - } - - let locals = PyDict::new(py); - locals.set_item(py, "obj", dict)?; - - py.eval("iter(obj)", None, Some(&locals)) - } - - def __contains__(&self, item: PyObject) -> PyResult<bool> { - Ok(self - .dirs_map(py) - .borrow() - .contains_key(item.extract::<PyBytes>(py)?.data(py).as_ref())) - } -}); - /// Create the module, with `__package__` given from parent pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> { let dotted_name = &format!("{}.dirstate", package); @@ -299,29 +94,9 @@ m.add(py, "__package__", package)?; m.add(py, "__doc__", "Dirstate - Rust implementation")?; - m.add( - py, - "parse_dirstate", - py_fn!( - py, - parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes) - ), - )?; - m.add( - py, - "pack_dirstate", - py_fn!( - py, - pack_dirstate_wrapper( - dmap: PyDict, - copymap: PyDict, - pl: PyTuple, - now: PyInt - ) - ), - )?; m.add_class::<Dirs>(py)?; + m.add_class::<DirstateMap>(py)?; let sys = PyModule::import(py, "sys")?; let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/dirstate/copymap.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,115 @@ +// copymap.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Bindings for `hg::dirstate::dirstate_map::CopyMap` provided by the +//! `hg-core` package. + +use cpython::{PyBytes, PyClone, PyDict, PyObject, PyResult, Python}; +use std::cell::RefCell; + +use crate::dirstate::dirstate_map::{DirstateMap, DirstateMapLeakedRef}; +use hg::CopyMapIter; + +py_class!(pub class CopyMap |py| { + data dirstate_map: DirstateMap; + + def __getitem__(&self, key: PyObject) -> PyResult<PyBytes> { + (*self.dirstate_map(py)).copymapgetitem(py, key) + } + + def __len__(&self) -> PyResult<usize> { + self.dirstate_map(py).copymaplen(py) + } + + def __contains__(&self, key: PyObject) -> PyResult<bool> { + self.dirstate_map(py).copymapcontains(py, key) + } + + def get( + &self, + key: PyObject, + default: Option<PyObject> = None + ) -> PyResult<Option<PyObject>> { + self.dirstate_map(py).copymapget(py, key, default) + } + + def pop( + &self, + key: PyObject, + default: Option<PyObject> = None + ) -> PyResult<Option<PyObject>> { + self.dirstate_map(py).copymappop(py, key, default) + } + + def __iter__(&self) -> PyResult<CopyMapKeysIterator> { + self.dirstate_map(py).copymapiter(py) + } + + // Python's `dict()` builtin works with either a subclass of dict + // or an abstract mapping. Said mapping needs to implement `__getitem__` + // and `keys`. + def keys(&self) -> PyResult<CopyMapKeysIterator> { + self.dirstate_map(py).copymapiter(py) + } + + def items(&self) -> PyResult<CopyMapItemsIterator> { + self.dirstate_map(py).copymapitemsiter(py) + } + + def iteritems(&self) -> PyResult<CopyMapItemsIterator> { + self.dirstate_map(py).copymapitemsiter(py) + } + + def __setitem__( + &self, + key: PyObject, + item: PyObject + ) -> PyResult<()> { + self.dirstate_map(py).copymapsetitem(py, key, item)?; + Ok(()) + } + + def copy(&self) -> PyResult<PyDict> { + self.dirstate_map(py).copymapcopy(py) + } + +}); + +impl CopyMap { + pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> { + Self::create_instance(py, dm) + } + fn translate_key( + py: Python, + res: (&Vec<u8>, &Vec<u8>), + ) -> PyResult<Option<PyBytes>> { + Ok(Some(PyBytes::new(py, res.0))) + } + fn translate_key_value( + py: Python, + res: (&Vec<u8>, &Vec<u8>), + ) -> PyResult<Option<(PyBytes, PyBytes)>> { + let (k, v) = res; + Ok(Some((PyBytes::new(py, k), PyBytes::new(py, v)))) + } +} + +py_shared_iterator!( + CopyMapKeysIterator, + DirstateMapLeakedRef, + CopyMapIter<'static>, + CopyMap::translate_key, + Option<PyBytes> +); + +py_shared_iterator!( + CopyMapItemsIterator, + DirstateMapLeakedRef, + CopyMapIter<'static>, + CopyMap::translate_key_value, + Option<(PyBytes, PyBytes)> +);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,130 @@ +// dirs_multiset.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Bindings for the `hg::dirstate::dirs_multiset` file provided by the +//! `hg-core` package. + +use std::cell::RefCell; +use std::convert::TryInto; + +use cpython::{ + exc, ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyObject, PyResult, + Python, +}; + +use crate::dirstate::extract_dirstate; +use crate::ref_sharing::{PySharedRefCell, PySharedState}; +use hg::{ + DirsMultiset, DirsMultisetIter, DirstateMapError, DirstateParseError, + EntryState, +}; + +py_class!(pub class Dirs |py| { + data inner: PySharedRefCell<DirsMultiset>; + data py_shared_state: PySharedState; + + // `map` is either a `dict` or a flat iterator (usually a `set`, sometimes + // a `list`) + def __new__( + _cls, + map: PyObject, + skip: Option<PyObject> = None + ) -> PyResult<Self> { + let mut skip_state: Option<EntryState> = None; + if let Some(skip) = skip { + skip_state = Some( + skip.extract::<PyBytes>(py)?.data(py)[0] + .try_into() + .map_err(|e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + })?, + ); + } + let inner = if let Ok(map) = map.cast_as::<PyDict>(py) { + let dirstate = extract_dirstate(py, &map)?; + DirsMultiset::from_dirstate(&dirstate, skip_state) + } else { + let map: Result<Vec<Vec<u8>>, PyErr> = map + .iter(py)? + .map(|o| Ok(o?.extract::<PyBytes>(py)?.data(py).to_owned())) + .collect(); + DirsMultiset::from_manifest(&map?) + }; + + Self::create_instance( + py, + PySharedRefCell::new(inner), + PySharedState::default() + ) + } + + def addpath(&self, path: PyObject) -> PyResult<PyObject> { + self.borrow_mut(py)?.add_path( + path.extract::<PyBytes>(py)?.data(py), + ); + Ok(py.None()) + } + + def delpath(&self, path: PyObject) -> PyResult<PyObject> { + self.borrow_mut(py)?.delete_path( + path.extract::<PyBytes>(py)?.data(py), + ) + .and(Ok(py.None())) + .or_else(|e| { + match e { + DirstateMapError::PathNotFound(_p) => { + Err(PyErr::new::<exc::ValueError, _>( + py, + "expected a value, found none".to_string(), + )) + } + DirstateMapError::EmptyPath => { + Ok(py.None()) + } + } + }) + } + def __iter__(&self) -> PyResult<DirsMultisetKeysIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + DirsMultisetKeysIterator::from_inner( + py, + leak_handle, + leaked_ref.iter(), + ) + } + + def __contains__(&self, item: PyObject) -> PyResult<bool> { + Ok(self + .inner(py) + .borrow() + .contains(item.extract::<PyBytes>(py)?.data(py).as_ref())) + } +}); + +py_shared_ref!(Dirs, DirsMultiset, inner, DirsMultisetLeakedRef,); + +impl Dirs { + pub fn from_inner(py: Python, d: DirsMultiset) -> PyResult<Self> { + Self::create_instance( + py, + PySharedRefCell::new(d), + PySharedState::default(), + ) + } + + fn translate_key(py: Python, res: &Vec<u8>) -> PyResult<Option<PyBytes>> { + Ok(Some(PyBytes::new(py, res))) + } +} + +py_shared_iterator!( + DirsMultisetKeysIterator, + DirsMultisetLeakedRef, + DirsMultisetIter<'static>, + Dirs::translate_key, + Option<PyBytes> +);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,509 @@ +// dirstate_map.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Bindings for the `hg::dirstate::dirstate_map` file provided by the +//! `hg-core` package. + +use std::cell::RefCell; +use std::convert::TryInto; +use std::time::Duration; + +use cpython::{ + exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyObject, + PyResult, PyTuple, Python, PythonObject, ToPyObject, +}; +use libc::c_char; + +use crate::{ + dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator}, + dirstate::{decapsule_make_dirstate_tuple, dirs_multiset::Dirs}, + ref_sharing::{PySharedRefCell, PySharedState}, +}; +use hg::{ + DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap, + DirstateParents, DirstateParseError, EntryState, StateMapIter, + PARENT_SIZE, +}; + +// TODO +// This object needs to share references to multiple members of its Rust +// inner struct, namely `copy_map`, `dirs` and `all_dirs`. +// Right now `CopyMap` is done, but it needs to have an explicit reference +// to `RustDirstateMap` which itself needs to have an encapsulation for +// every method in `CopyMap` (copymapcopy, etc.). +// This is ugly and hard to maintain. +// The same logic applies to `dirs` and `all_dirs`, however the `Dirs` +// `py_class!` is already implemented and does not mention +// `RustDirstateMap`, rightfully so. +// All attributes also have to have a separate refcount data attribute for +// leaks, with all methods that go along for reference sharing. +py_class!(pub class DirstateMap |py| { + data inner: PySharedRefCell<RustDirstateMap>; + data py_shared_state: PySharedState; + + def __new__(_cls, _root: PyObject) -> PyResult<Self> { + let inner = RustDirstateMap::default(); + Self::create_instance( + py, + PySharedRefCell::new(inner), + PySharedState::default() + ) + } + + def clear(&self) -> PyResult<PyObject> { + self.borrow_mut(py)?.clear(); + Ok(py.None()) + } + + def get( + &self, + key: PyObject, + default: Option<PyObject> = None + ) -> PyResult<Option<PyObject>> { + let key = key.extract::<PyBytes>(py)?; + match self.inner(py).borrow().get(key.data(py)) { + Some(entry) => { + // Explicitly go through u8 first, then cast to + // platform-specific `c_char`. + let state: u8 = entry.state.into(); + Ok(Some(decapsule_make_dirstate_tuple(py)?( + state as c_char, + entry.mode, + entry.size, + entry.mtime, + ))) + }, + None => Ok(default) + } + } + + def addfile( + &self, + f: PyObject, + oldstate: PyObject, + state: PyObject, + mode: PyObject, + size: PyObject, + mtime: PyObject + ) -> PyResult<PyObject> { + self.borrow_mut(py)?.add_file( + f.extract::<PyBytes>(py)?.data(py), + oldstate.extract::<PyBytes>(py)?.data(py)[0] + .try_into() + .map_err(|e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + })?, + DirstateEntry { + state: state.extract::<PyBytes>(py)?.data(py)[0] + .try_into() + .map_err(|e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + })?, + mode: mode.extract(py)?, + size: size.extract(py)?, + mtime: mtime.extract(py)?, + }, + ); + Ok(py.None()) + } + + def removefile( + &self, + f: PyObject, + oldstate: PyObject, + size: PyObject + ) -> PyResult<PyObject> { + self.borrow_mut(py)? + .remove_file( + f.extract::<PyBytes>(py)?.data(py), + oldstate.extract::<PyBytes>(py)?.data(py)[0] + .try_into() + .map_err(|e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + })?, + size.extract(py)?, + ) + .or_else(|_| { + Err(PyErr::new::<exc::OSError, _>( + py, + "Dirstate error".to_string(), + )) + })?; + Ok(py.None()) + } + + def dropfile( + &self, + f: PyObject, + oldstate: PyObject + ) -> PyResult<PyBool> { + self.borrow_mut(py)? + .drop_file( + f.extract::<PyBytes>(py)?.data(py), + oldstate.extract::<PyBytes>(py)?.data(py)[0] + .try_into() + .map_err(|e: DirstateParseError| { + PyErr::new::<exc::ValueError, _>(py, e.to_string()) + })?, + ) + .and_then(|b| Ok(b.to_py_object(py))) + .or_else(|_| { + Err(PyErr::new::<exc::OSError, _>( + py, + "Dirstate error".to_string(), + )) + }) + } + + def clearambiguoustimes( + &self, + files: PyObject, + now: PyObject + ) -> PyResult<PyObject> { + let files: PyResult<Vec<Vec<u8>>> = files + .iter(py)? + .map(|filename| { + Ok(filename?.extract::<PyBytes>(py)?.data(py).to_owned()) + }) + .collect(); + self.borrow_mut(py)? + .clear_ambiguous_times(files?, now.extract(py)?); + Ok(py.None()) + } + + // TODO share the reference + def nonnormalentries(&self) -> PyResult<PyObject> { + let (non_normal, other_parent) = + self.inner(py).borrow().non_normal_other_parent_entries(); + + let locals = PyDict::new(py); + locals.set_item( + py, + "non_normal", + non_normal + .iter() + .map(|v| PyBytes::new(py, &v)) + .collect::<Vec<PyBytes>>() + .to_py_object(py), + )?; + locals.set_item( + py, + "other_parent", + other_parent + .iter() + .map(|v| PyBytes::new(py, &v)) + .collect::<Vec<PyBytes>>() + .to_py_object(py), + )?; + + py.eval("set(non_normal), set(other_parent)", None, Some(&locals)) + } + + def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> { + let d = d.extract::<PyBytes>(py)?; + Ok(self.borrow_mut(py)? + .has_tracked_dir(d.data(py)) + .to_py_object(py)) + } + + def hasdir(&self, d: PyObject) -> PyResult<PyBool> { + let d = d.extract::<PyBytes>(py)?; + Ok(self.borrow_mut(py)? + .has_dir(d.data(py)) + .to_py_object(py)) + } + + def parents(&self, st: PyObject) -> PyResult<PyTuple> { + self.borrow_mut(py)? + .parents(st.extract::<PyBytes>(py)?.data(py)) + .and_then(|d| { + Ok((PyBytes::new(py, &d.p1), PyBytes::new(py, &d.p2)) + .to_py_object(py)) + }) + .or_else(|_| { + Err(PyErr::new::<exc::OSError, _>( + py, + "Dirstate error".to_string(), + )) + }) + } + + def setparents(&self, p1: PyObject, p2: PyObject) -> PyResult<PyObject> { + let p1 = extract_node_id(py, &p1)?; + let p2 = extract_node_id(py, &p2)?; + + self.borrow_mut(py)? + .set_parents(&DirstateParents { p1, p2 }); + Ok(py.None()) + } + + def read(&self, st: PyObject) -> PyResult<Option<PyObject>> { + match self.borrow_mut(py)? + .read(st.extract::<PyBytes>(py)?.data(py)) + { + Ok(Some(parents)) => Ok(Some( + (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2)) + .to_py_object(py) + .into_object(), + )), + Ok(None) => Ok(Some(py.None())), + Err(_) => Err(PyErr::new::<exc::OSError, _>( + py, + "Dirstate error".to_string(), + )), + } + } + def write( + &self, + p1: PyObject, + p2: PyObject, + now: PyObject + ) -> PyResult<PyBytes> { + let now = Duration::new(now.extract(py)?, 0); + let parents = DirstateParents { + p1: extract_node_id(py, &p1)?, + p2: extract_node_id(py, &p2)?, + }; + + match self.borrow_mut(py)?.pack(parents, now) { + Ok(packed) => Ok(PyBytes::new(py, &packed)), + Err(_) => Err(PyErr::new::<exc::OSError, _>( + py, + "Dirstate error".to_string(), + )), + } + } + + def filefoldmapasdict(&self) -> PyResult<PyDict> { + let dict = PyDict::new(py); + for (key, value) in + self.borrow_mut(py)?.build_file_fold_map().iter() + { + dict.set_item(py, key, value)?; + } + Ok(dict) + } + + def __len__(&self) -> PyResult<usize> { + Ok(self.inner(py).borrow().len()) + } + + def __contains__(&self, key: PyObject) -> PyResult<bool> { + let key = key.extract::<PyBytes>(py)?; + Ok(self.inner(py).borrow().contains_key(key.data(py))) + } + + def __getitem__(&self, key: PyObject) -> PyResult<PyObject> { + let key = key.extract::<PyBytes>(py)?; + let key = key.data(py); + match self.inner(py).borrow().get(key) { + Some(entry) => { + // Explicitly go through u8 first, then cast to + // platform-specific `c_char`. + let state: u8 = entry.state.into(); + Ok(decapsule_make_dirstate_tuple(py)?( + state as c_char, + entry.mode, + entry.size, + entry.mtime, + )) + }, + None => Err(PyErr::new::<exc::KeyError, _>( + py, + String::from_utf8_lossy(key), + )), + } + } + + def keys(&self) -> PyResult<DirstateMapKeysIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + DirstateMapKeysIterator::from_inner( + py, + leak_handle, + leaked_ref.iter(), + ) + } + + def items(&self) -> PyResult<DirstateMapItemsIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + DirstateMapItemsIterator::from_inner( + py, + leak_handle, + leaked_ref.iter(), + ) + } + + def __iter__(&self) -> PyResult<DirstateMapKeysIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + DirstateMapKeysIterator::from_inner( + py, + leak_handle, + leaked_ref.iter(), + ) + } + + def getdirs(&self) -> PyResult<Dirs> { + // TODO don't copy, share the reference + self.borrow_mut(py)?.set_dirs(); + Dirs::from_inner( + py, + DirsMultiset::from_dirstate( + &self.inner(py).borrow(), + Some(EntryState::Removed), + ), + ) + } + def getalldirs(&self) -> PyResult<Dirs> { + // TODO don't copy, share the reference + self.borrow_mut(py)?.set_all_dirs(); + Dirs::from_inner( + py, + DirsMultiset::from_dirstate( + &self.inner(py).borrow(), + None, + ), + ) + } + + // TODO all copymap* methods, see docstring above + def copymapcopy(&self) -> PyResult<PyDict> { + let dict = PyDict::new(py); + for (key, value) in self.inner(py).borrow().copy_map.iter() { + dict.set_item(py, PyBytes::new(py, key), PyBytes::new(py, value))?; + } + Ok(dict) + } + + def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> { + let key = key.extract::<PyBytes>(py)?; + match self.inner(py).borrow().copy_map.get(key.data(py)) { + Some(copy) => Ok(PyBytes::new(py, copy)), + None => Err(PyErr::new::<exc::KeyError, _>( + py, + String::from_utf8_lossy(key.data(py)), + )), + } + } + def copymap(&self) -> PyResult<CopyMap> { + CopyMap::from_inner(py, self.clone_ref(py)) + } + + def copymaplen(&self) -> PyResult<usize> { + Ok(self.inner(py).borrow().copy_map.len()) + } + def copymapcontains(&self, key: PyObject) -> PyResult<bool> { + let key = key.extract::<PyBytes>(py)?; + Ok(self.inner(py).borrow().copy_map.contains_key(key.data(py))) + } + def copymapget( + &self, + key: PyObject, + default: Option<PyObject> + ) -> PyResult<Option<PyObject>> { + let key = key.extract::<PyBytes>(py)?; + match self.inner(py).borrow().copy_map.get(key.data(py)) { + Some(copy) => Ok(Some(PyBytes::new(py, copy).into_object())), + None => Ok(default), + } + } + def copymapsetitem( + &self, + key: PyObject, + value: PyObject + ) -> PyResult<PyObject> { + let key = key.extract::<PyBytes>(py)?; + let value = value.extract::<PyBytes>(py)?; + self.borrow_mut(py)? + .copy_map + .insert(key.data(py).to_vec(), value.data(py).to_vec()); + Ok(py.None()) + } + def copymappop( + &self, + key: PyObject, + default: Option<PyObject> + ) -> PyResult<Option<PyObject>> { + let key = key.extract::<PyBytes>(py)?; + match self.borrow_mut(py)?.copy_map.remove(key.data(py)) { + Some(_) => Ok(None), + None => Ok(default), + } + } + + def copymapiter(&self) -> PyResult<CopyMapKeysIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + CopyMapKeysIterator::from_inner( + py, + leak_handle, + leaked_ref.copy_map.iter(), + ) + } + + def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> { + let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; + CopyMapItemsIterator::from_inner( + py, + leak_handle, + leaked_ref.copy_map.iter(), + ) + } + +}); + +impl DirstateMap { + fn translate_key( + py: Python, + res: (&Vec<u8>, &DirstateEntry), + ) -> PyResult<Option<PyBytes>> { + Ok(Some(PyBytes::new(py, res.0))) + } + fn translate_key_value( + py: Python, + res: (&Vec<u8>, &DirstateEntry), + ) -> PyResult<Option<(PyBytes, PyObject)>> { + let (f, entry) = res; + + // Explicitly go through u8 first, then cast to + // platform-specific `c_char`. + let state: u8 = entry.state.into(); + Ok(Some(( + PyBytes::new(py, f), + decapsule_make_dirstate_tuple(py)?( + state as c_char, + entry.mode, + entry.size, + entry.mtime, + ), + ))) + } +} + +py_shared_ref!(DirstateMap, RustDirstateMap, inner, DirstateMapLeakedRef,); + +py_shared_iterator!( + DirstateMapKeysIterator, + DirstateMapLeakedRef, + StateMapIter<'static>, + DirstateMap::translate_key, + Option<PyBytes> +); + +py_shared_iterator!( + DirstateMapItemsIterator, + DirstateMapLeakedRef, + StateMapIter<'static>, + DirstateMap::translate_key_value, + Option<(PyBytes, PyObject)> +); + +fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<[u8; PARENT_SIZE]> { + let bytes = obj.extract::<PyBytes>(py)?; + match bytes.data(py).try_into() { + Ok(s) => Ok(s), + Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())), + } +}
--- a/rust/hg-cpython/src/discovery.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-cpython/src/discovery.rs Mon Sep 09 17:26:17 2019 -0400 @@ -18,7 +18,7 @@ exceptions::GraphError, }; use cpython::{ - ObjectProtocol, PyDict, PyModule, PyObject, PyResult, Python, + ObjectProtocol, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, PythonObject, ToPyObject, }; use hg::discovery::PartialDiscovery as CorePartialDiscovery; @@ -29,16 +29,24 @@ py_class!(pub class PartialDiscovery |py| { data inner: RefCell<Box<CorePartialDiscovery<Index>>>; + // `_respectsize` is currently only here to replicate the Python API and + // will be used in future patches inside methods that are yet to be + // implemented. def __new__( _cls, - index: PyObject, - targetheads: PyObject + repo: PyObject, + targetheads: PyObject, + respectsize: bool, + randomize: bool = true ) -> PyResult<PartialDiscovery> { + let index = repo.getattr(py, "changelog")?.getattr(py, "index")?; Self::create_instance( py, RefCell::new(Box::new(CorePartialDiscovery::new( Index::new(py, index)?, rev_pyiter_collect(py, &targetheads)?, + respectsize, + randomize, ))) ) } @@ -105,6 +113,32 @@ .map_err(|e| GraphError::pynew(py, e))? ) } + + def takefullsample(&self, _headrevs: PyObject, + size: usize) -> PyResult<PyObject> { + let mut inner = self.inner(py).borrow_mut(); + let sample = inner.take_full_sample(size) + .map_err(|e| GraphError::pynew(py, e))?; + let as_vec: Vec<PyObject> = sample + .iter() + .map(|rev| rev.to_py_object(py).into_object()) + .collect(); + Ok(PyTuple::new(py, as_vec.as_slice()).into_object()) + } + + def takequicksample(&self, headrevs: PyObject, + size: usize) -> PyResult<PyObject> { + let mut inner = self.inner(py).borrow_mut(); + let revsvec: Vec<Revision> = rev_pyiter_collect(py, &headrevs)?; + let sample = inner.take_quick_sample(revsvec, size) + .map_err(|e| GraphError::pynew(py, e))?; + let as_vec: Vec<PyObject> = sample + .iter() + .map(|rev| rev.to_py_object(py).into_object()) + .collect(); + Ok(PyTuple::new(py, as_vec.as_slice()).into_object()) + } + }); /// Create the module, with __package__ given from parent
--- a/rust/hg-cpython/src/exceptions.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-cpython/src/exceptions.rs Mon Sep 09 17:26:17 2019 -0400 @@ -67,3 +67,5 @@ } } } + +py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-cpython/src/filepatterns.rs Mon Sep 09 17:26:17 2019 -0400 @@ -8,7 +8,8 @@ //! Bindings for the `hg::filepatterns` module provided by the //! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns` -//! and can be used as replacement for the the pure `filepatterns` Python module. +//! and can be used as replacement for the the pure `filepatterns` Python +//! module. //! use crate::exceptions::{PatternError, PatternFileError}; use cpython::{
--- a/rust/hg-cpython/src/lib.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-cpython/src/lib.rs Mon Sep 09 17:26:17 2019 -0400 @@ -27,11 +27,14 @@ pub mod ancestors; mod cindex; mod conversion; +#[macro_use] +pub mod ref_sharing; pub mod dagops; pub mod dirstate; pub mod discovery; pub mod exceptions; pub mod filepatterns; +pub mod parsers; py_module_initializer!(rustext, initrustext, PyInit_rustext, |py, m| { m.add( @@ -50,6 +53,11 @@ "filepatterns", filepatterns::init_module(py, &dotted_name)?, )?; + m.add( + py, + "parsers", + parsers::init_parsers_module(py, &dotted_name)?, + )?; m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?; m.add( py,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/parsers.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,208 @@ +// parsers.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Bindings for the `hg::dirstate::parsers` module provided by the +//! `hg-core` package. +//! +//! From Python, this will be seen as `mercurial.rustext.parsers` +//! +use cpython::{ + exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python, + PythonObject, ToPyObject, +}; +use hg::{ + pack_dirstate, parse_dirstate, DirstateEntry, DirstatePackError, + DirstateParents, DirstateParseError, PARENT_SIZE, +}; +use std::collections::HashMap; +use std::convert::TryInto; + +use libc::c_char; + +use crate::dirstate::{decapsule_make_dirstate_tuple, extract_dirstate}; +use std::time::Duration; + +fn parse_dirstate_wrapper( + py: Python, + dmap: PyDict, + copymap: PyDict, + st: PyBytes, +) -> PyResult<PyTuple> { + let mut dirstate_map = HashMap::new(); + let mut copies = HashMap::new(); + + match parse_dirstate(&mut dirstate_map, &mut copies, st.data(py)) { + Ok(parents) => { + for (filename, entry) in dirstate_map { + // Explicitly go through u8 first, then cast to + // platform-specific `c_char` because Into<u8> has a specific + // implementation while `as c_char` would just do a naive enum + // cast. + let state: u8 = entry.state.into(); + + dmap.set_item( + py, + PyBytes::new(py, &filename), + decapsule_make_dirstate_tuple(py)?( + state as c_char, + entry.mode, + entry.size, + entry.mtime, + ), + )?; + } + for (path, copy_path) in copies { + copymap.set_item( + py, + PyBytes::new(py, &path), + PyBytes::new(py, ©_path), + )?; + } + Ok( + (PyBytes::new(py, &parents.p1), PyBytes::new(py, &parents.p2)) + .to_py_object(py), + ) + } + Err(e) => Err(PyErr::new::<exc::ValueError, _>( + py, + match e { + DirstateParseError::TooLittleData => { + "too little data for parents".to_string() + } + DirstateParseError::Overflow => { + "overflow in dirstate".to_string() + } + DirstateParseError::CorruptedEntry(e) => e, + DirstateParseError::Damaged => { + "dirstate appears to be damaged".to_string() + } + }, + )), + } +} + +fn pack_dirstate_wrapper( + py: Python, + dmap: PyDict, + copymap: PyDict, + pl: PyTuple, + now: PyInt, +) -> PyResult<PyBytes> { + let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?; + let p1: &[u8] = p1.data(py); + let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?; + let p2: &[u8] = p2.data(py); + + let mut dirstate_map = extract_dirstate(py, &dmap)?; + + let copies: Result<HashMap<Vec<u8>, Vec<u8>>, PyErr> = copymap + .items(py) + .iter() + .map(|(key, value)| { + Ok(( + key.extract::<PyBytes>(py)?.data(py).to_owned(), + value.extract::<PyBytes>(py)?.data(py).to_owned(), + )) + }) + .collect(); + + if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE { + return Err(PyErr::new::<exc::ValueError, _>( + py, + "expected a 20-byte hash".to_string(), + )); + } + + match pack_dirstate( + &mut dirstate_map, + &copies?, + DirstateParents { + p1: p1.try_into().unwrap(), + p2: p2.try_into().unwrap(), + }, + Duration::from_secs(now.as_object().extract::<u64>(py)?), + ) { + Ok(packed) => { + for ( + filename, + DirstateEntry { + state, + mode, + size, + mtime, + }, + ) in dirstate_map + { + // Explicitly go through u8 first, then cast to + // platform-specific `c_char` because Into<u8> has a specific + // implementation while `as c_char` would just do a naive enum + // cast. + let state: u8 = state.into(); + dmap.set_item( + py, + PyBytes::new(py, &filename[..]), + decapsule_make_dirstate_tuple(py)?( + state as c_char, + mode, + size, + mtime, + ), + )?; + } + Ok(PyBytes::new(py, &packed)) + } + Err(error) => Err(PyErr::new::<exc::ValueError, _>( + py, + match error { + DirstatePackError::CorruptedParent => { + "expected a 20-byte hash".to_string() + } + DirstatePackError::CorruptedEntry(e) => e, + DirstatePackError::BadSize(expected, actual) => { + format!("bad dirstate size: {} != {}", actual, expected) + } + }, + )), + } +} + +/// Create the module, with `__package__` given from parent +pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> { + let dotted_name = &format!("{}.parsers", package); + let m = PyModule::new(py, dotted_name)?; + + m.add(py, "__package__", package)?; + m.add(py, "__doc__", "Parsers - Rust implementation")?; + + m.add( + py, + "parse_dirstate", + py_fn!( + py, + parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes) + ), + )?; + m.add( + py, + "pack_dirstate", + py_fn!( + py, + pack_dirstate_wrapper( + dmap: PyDict, + copymap: PyDict, + pl: PyTuple, + now: PyInt + ) + ), + )?; + + let sys = PyModule::import(py, "sys")?; + let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?; + sys_modules.set_item(py, dotted_name, &m)?; + + Ok(m) +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-cpython/src/ref_sharing.rs Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,392 @@ +// macros.rs +// +// Copyright 2019 Raphaël Gomès <rgomes@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Macros for use in the `hg-cpython` bridge library. + +use crate::exceptions::AlreadyBorrowed; +use cpython::{PyResult, Python}; +use std::cell::{Cell, Ref, RefCell, RefMut}; + +/// Manages the shared state between Python and Rust +#[derive(Default)] +pub struct PySharedState { + leak_count: Cell<usize>, + mutably_borrowed: Cell<bool>, +} + +impl PySharedState { + pub fn borrow_mut<'a, T>( + &'a self, + py: Python<'a>, + pyrefmut: RefMut<'a, T>, + ) -> PyResult<PyRefMut<'a, T>> { + if self.mutably_borrowed.get() { + return Err(AlreadyBorrowed::new( + py, + "Cannot borrow mutably while there exists another \ + mutable reference in a Python object", + )); + } + match self.leak_count.get() { + 0 => { + self.mutably_borrowed.replace(true); + Ok(PyRefMut::new(py, pyrefmut, self)) + } + // TODO + // For now, this works differently than Python references + // in the case of iterators. + // Python does not complain when the data an iterator + // points to is modified if the iterator is never used + // afterwards. + // Here, we are stricter than this by refusing to give a + // mutable reference if it is already borrowed. + // While the additional safety might be argued for, it + // breaks valid programming patterns in Python and we need + // to fix this issue down the line. + _ => Err(AlreadyBorrowed::new( + py, + "Cannot borrow mutably while there are \ + immutable references in Python objects", + )), + } + } + + /// Return a reference to the wrapped data with an artificial static + /// lifetime. + /// We need to be protected by the GIL for thread-safety. + /// + /// # Safety + /// + /// This is highly unsafe since the lifetime of the given data can be + /// extended. Do not call this function directly. + pub unsafe fn leak_immutable<T>( + &self, + py: Python, + data: &PySharedRefCell<T>, + ) -> PyResult<&'static T> { + if self.mutably_borrowed.get() { + return Err(AlreadyBorrowed::new( + py, + "Cannot borrow immutably while there is a \ + mutable reference in Python objects", + )); + } + let ptr = data.as_ptr(); + self.leak_count.replace(self.leak_count.get() + 1); + Ok(&*ptr) + } + + /// # Safety + /// + /// It's unsafe to update the reference count without knowing the + /// reference is deleted. Do not call this function directly. + pub unsafe fn decrease_leak_count(&self, _py: Python, mutable: bool) { + self.leak_count + .replace(self.leak_count.get().saturating_sub(1)); + if mutable { + self.mutably_borrowed.replace(false); + } + } +} + +/// `RefCell` wrapper to be safely used in conjunction with `PySharedState`. +/// +/// Only immutable operation is allowed through this interface. +#[derive(Debug)] +pub struct PySharedRefCell<T> { + inner: RefCell<T>, +} + +impl<T> PySharedRefCell<T> { + pub const fn new(value: T) -> PySharedRefCell<T> { + Self { + inner: RefCell::new(value), + } + } + + pub fn borrow(&self) -> Ref<T> { + // py_shared_state isn't involved since + // - inner.borrow() would fail if self is mutably borrowed, + // - and inner.borrow_mut() would fail while self is borrowed. + self.inner.borrow() + } + + pub fn as_ptr(&self) -> *mut T { + self.inner.as_ptr() + } + + pub unsafe fn borrow_mut(&self) -> RefMut<T> { + // must be borrowed by self.py_shared_state(py).borrow_mut(). + self.inner.borrow_mut() + } +} + +/// Holds a mutable reference to data shared between Python and Rust. +pub struct PyRefMut<'a, T> { + inner: RefMut<'a, T>, + py_shared_state: &'a PySharedState, +} + +impl<'a, T> PyRefMut<'a, T> { + // Must be constructed by PySharedState after checking its leak_count. + // Otherwise, drop() would incorrectly update the state. + fn new( + _py: Python<'a>, + inner: RefMut<'a, T>, + py_shared_state: &'a PySharedState, + ) -> Self { + Self { + inner, + py_shared_state, + } + } +} + +impl<'a, T> std::ops::Deref for PyRefMut<'a, T> { + type Target = RefMut<'a, T>; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl<'a, T> std::ops::DerefMut for PyRefMut<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl<'a, T> Drop for PyRefMut<'a, T> { + fn drop(&mut self) { + let gil = Python::acquire_gil(); + let py = gil.python(); + unsafe { + self.py_shared_state.decrease_leak_count(py, true); + } + } +} + +/// Allows a `py_class!` generated struct to share references to one of its +/// data members with Python. +/// +/// # Warning +/// +/// The targeted `py_class!` needs to have the +/// `data py_shared_state: PySharedState;` data attribute to compile. +/// A better, more complicated macro is needed to automatically insert it, +/// but this one is not yet really battle tested (what happens when +/// multiple references are needed?). See the example below. +/// +/// TODO allow Python container types: for now, integration with the garbage +/// collector does not extend to Rust structs holding references to Python +/// objects. Should the need surface, `__traverse__` and `__clear__` will +/// need to be written as per the `rust-cpython` docs on GC integration. +/// +/// # Parameters +/// +/// * `$name` is the same identifier used in for `py_class!` macro call. +/// * `$inner_struct` is the identifier of the underlying Rust struct +/// * `$data_member` is the identifier of the data member of `$inner_struct` +/// that will be shared. +/// * `$leaked` is the identifier to give to the struct that will manage +/// references to `$name`, to be used for example in other macros like +/// `py_shared_iterator`. +/// +/// # Example +/// +/// ``` +/// struct MyStruct { +/// inner: Vec<u32>; +/// } +/// +/// py_class!(pub class MyType |py| { +/// data inner: PySharedRefCell<MyStruct>; +/// data py_shared_state: PySharedState; +/// }); +/// +/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef); +/// ``` +macro_rules! py_shared_ref { + ( + $name: ident, + $inner_struct: ident, + $data_member: ident, + $leaked: ident, + ) => { + impl $name { + fn borrow_mut<'a>( + &'a self, + py: Python<'a>, + ) -> PyResult<crate::ref_sharing::PyRefMut<'a, $inner_struct>> + { + // assert $data_member type + use crate::ref_sharing::PySharedRefCell; + let data: &PySharedRefCell<_> = self.$data_member(py); + self.py_shared_state(py) + .borrow_mut(py, unsafe { data.borrow_mut() }) + } + + /// Returns a leaked reference and its management object. + /// + /// # Safety + /// + /// It's up to you to make sure that the management object lives + /// longer than the leaked reference. Otherwise, you'll get a + /// dangling reference. + unsafe fn leak_immutable<'a>( + &'a self, + py: Python<'a>, + ) -> PyResult<($leaked, &'static $inner_struct)> { + // assert $data_member type + use crate::ref_sharing::PySharedRefCell; + let data: &PySharedRefCell<_> = self.$data_member(py); + let static_ref = + self.py_shared_state(py).leak_immutable(py, data)?; + let leak_handle = $leaked::new(py, self); + Ok((leak_handle, static_ref)) + } + } + + /// Manage immutable references to `$name` leaked into Python + /// iterators. + /// + /// In truth, this does not represent leaked references themselves; + /// it is instead useful alongside them to manage them. + pub struct $leaked { + inner: $name, + } + + impl $leaked { + // Marked as unsafe so client code wouldn't construct $leaked + // struct by mistake. Its drop() is unsafe. + unsafe fn new(py: Python, inner: &$name) -> Self { + Self { + inner: inner.clone_ref(py), + } + } + } + + impl Drop for $leaked { + fn drop(&mut self) { + let gil = Python::acquire_gil(); + let py = gil.python(); + let state = self.inner.py_shared_state(py); + unsafe { + state.decrease_leak_count(py, false); + } + } + } + }; +} + +/// Defines a `py_class!` that acts as a Python iterator over a Rust iterator. +/// +/// TODO: this is a bit awkward to use, and a better (more complicated) +/// procedural macro would simplify the interface a lot. +/// +/// # Parameters +/// +/// * `$name` is the identifier to give to the resulting Rust struct. +/// * `$leaked` corresponds to `$leaked` in the matching `py_shared_ref!` call. +/// * `$iterator_type` is the type of the Rust iterator. +/// * `$success_func` is a function for processing the Rust `(key, value)` +/// tuple on iteration success, turning it into something Python understands. +/// * `$success_func` is the return type of `$success_func` +/// +/// # Example +/// +/// ``` +/// struct MyStruct { +/// inner: HashMap<Vec<u8>, Vec<u8>>; +/// } +/// +/// py_class!(pub class MyType |py| { +/// data inner: PySharedRefCell<MyStruct>; +/// data py_shared_state: PySharedState; +/// +/// def __iter__(&self) -> PyResult<MyTypeItemsIterator> { +/// let (leak_handle, leaked_ref) = unsafe { self.leak_immutable(py)? }; +/// MyTypeItemsIterator::from_inner( +/// py, +/// leak_handle, +/// leaked_ref.iter(), +/// ) +/// } +/// }); +/// +/// impl MyType { +/// fn translate_key_value( +/// py: Python, +/// res: (&Vec<u8>, &Vec<u8>), +/// ) -> PyResult<Option<(PyBytes, PyBytes)>> { +/// let (f, entry) = res; +/// Ok(Some(( +/// PyBytes::new(py, f), +/// PyBytes::new(py, entry), +/// ))) +/// } +/// } +/// +/// py_shared_ref!(MyType, MyStruct, inner, MyTypeLeakedRef); +/// +/// py_shared_iterator!( +/// MyTypeItemsIterator, +/// MyTypeLeakedRef, +/// HashMap<'static, Vec<u8>, Vec<u8>>, +/// MyType::translate_key_value, +/// Option<(PyBytes, PyBytes)> +/// ); +/// ``` +macro_rules! py_shared_iterator { + ( + $name: ident, + $leaked: ident, + $iterator_type: ty, + $success_func: expr, + $success_type: ty + ) => { + py_class!(pub class $name |py| { + data inner: RefCell<Option<$leaked>>; + data it: RefCell<$iterator_type>; + + def __next__(&self) -> PyResult<$success_type> { + let mut inner_opt = self.inner(py).borrow_mut(); + if inner_opt.is_some() { + match self.it(py).borrow_mut().next() { + None => { + // replace Some(inner) by None, drop $leaked + inner_opt.take(); + Ok(None) + } + Some(res) => { + $success_func(py, res) + } + } + } else { + Ok(None) + } + } + + def __iter__(&self) -> PyResult<Self> { + Ok(self.clone_ref(py)) + } + }); + + impl $name { + pub fn from_inner( + py: Python, + leaked: $leaked, + it: $iterator_type + ) -> PyResult<Self> { + Self::create_instance( + py, + RefCell::new(Some(leaked)), + RefCell::new(it) + ) + } + } + }; +}
--- a/rust/hg-direct-ffi/src/ancestors.rs Sat Sep 07 14:35:21 2019 +0100 +++ b/rust/hg-direct-ffi/src/ancestors.rs Mon Sep 09 17:26:17 2019 -0400 @@ -36,9 +36,7 @@ impl Index { pub fn new(index: IndexPtr) -> Self { - Index { - index: index, - } + Index { index: index } } } @@ -46,8 +44,13 @@ /// wrap a call to the C extern parents function fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> { let mut res: [c_int; 2] = [0; 2]; - let code = - unsafe { HgRevlogIndex_GetParents(self.index, rev, &mut res as *mut [c_int; 2]) }; + let code = unsafe { + HgRevlogIndex_GetParents( + self.index, + rev, + &mut res as *mut [c_int; 2], + ) + }; match code { 0 => Ok(res), _ => Err(GraphError::ParentOutOfRange(rev)), @@ -98,22 +101,26 @@ let slice = slice::from_raw_parts(initrevs, initrevslen); - Box::into_raw(Box::new(match AncestorsIterator::new( - graph, - slice.into_iter().map(|&r| r as Revision), - stoprev as Revision, - inclb, - ) { - Ok(it) => it, - Err(_) => { - return null_mut(); - } - })) + Box::into_raw(Box::new( + match AncestorsIterator::new( + graph, + slice.into_iter().map(|&r| r as Revision), + stoprev as Revision, + inclb, + ) { + Ok(it) => it, + Err(_) => { + return null_mut(); + } + }, + )) } /// Deallocator to be called from C code #[no_mangle] -pub extern "C" fn rustlazyancestors_drop(raw_iter: *mut AncestorsIterator<Index>) { +pub extern "C" fn rustlazyancestors_drop( + raw_iter: *mut AncestorsIterator<Index>, +) { raw_drop(raw_iter); } @@ -131,7 +138,9 @@ /// it will be up to the C wrapper to convert that back into a Python end of /// iteration #[no_mangle] -pub extern "C" fn rustlazyancestors_next(raw: *mut AncestorsIterator<Index>) -> c_long { +pub extern "C" fn rustlazyancestors_next( + raw: *mut AncestorsIterator<Index>, +) -> c_long { raw_next(raw) } @@ -227,7 +236,9 @@ let mut initrevs: Vec<c_long> = vec![11, 13]; let initrevs_len = initrevs.len(); let initrevs_ptr = initrevs.as_mut_ptr() as usize; - let handler = thread::spawn(move || stub_raw_init(initrevs_len, initrevs_ptr, 0, 1)); + let handler = thread::spawn(move || { + stub_raw_init(initrevs_len, initrevs_ptr, 0, 1) + }); let raw = handler.join().unwrap() as *mut AncestorsIterator<Stub>; assert_eq!(raw_next(raw), 13);
--- a/setup.py Sat Sep 07 14:35:21 2019 +0100 +++ b/setup.py Mon Sep 09 17:26:17 2019 -0400 @@ -1067,6 +1067,7 @@ 'mercurial.cext', 'mercurial.cffi', 'mercurial.hgweb', + 'mercurial.interfaces', 'mercurial.pure', 'mercurial.thirdparty', 'mercurial.thirdparty.attr', @@ -1078,8 +1079,8 @@ 'hgext', 'hgext.convert', 'hgext.fsmonitor', 'hgext.fastannotate', 'hgext.fsmonitor.pywatchman', + 'hgext.highlight', 'hgext.infinitepush', - 'hgext.highlight', 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow', 'hgext.remotefilelog', 'hgext.zeroconf', 'hgext3rd',
--- a/tests/fakedirstatewritetime.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/fakedirstatewritetime.py Mon Sep 09 17:26:17 2019 -0400 @@ -30,6 +30,7 @@ ) parsers = policy.importmod(r'parsers') +rustmod = policy.importrust(r'parsers') def pack_dirstate(fakenow, orig, dmap, copymap, pl, now): # execute what original parsers.pack_dirstate should do actually @@ -57,16 +58,21 @@ # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] - if rustext is not None: - orig_module = rustext.dirstate - orig_pack_dirstate = rustext.dirstate.pack_dirstate - else: - orig_module = parsers - orig_pack_dirstate = parsers.pack_dirstate + if rustmod is not None: + # The Rust implementation does not use public parse/pack dirstate + # to prevent conversion round-trips + orig_dirstatemap_write = dirstate.dirstatemap.write + wrapper = lambda self, st, now: orig_dirstatemap_write(self, + st, + fakenow) + dirstate.dirstatemap.write = wrapper orig_dirstate_getfsnow = dirstate._getfsnow wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args) + orig_module = parsers + orig_pack_dirstate = parsers.pack_dirstate + orig_module.pack_dirstate = wrapper dirstate._getfsnow = lambda *args: fakenow try: @@ -74,6 +80,8 @@ finally: orig_module.pack_dirstate = orig_pack_dirstate dirstate._getfsnow = orig_dirstate_getfsnow + if rustmod is not None: + dirstate.dirstatemap.write = orig_dirstatemap_write def _poststatusfixup(orig, workingctx, status, fixup): ui = workingctx.repo().ui
--- a/tests/flagprocessorext.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/flagprocessorext.py Mon Sep 09 17:26:17 2019 -0400 @@ -12,6 +12,9 @@ revlog, util, ) +from mercurial.revlogutils import ( + flagutil, +) # Test only: These flags are defined here only in the context of testing the # behavior of the flag processor. The canonical way to add flags is to get in @@ -58,7 +61,7 @@ class wrappedfile(obj.__class__): def addrevision(self, text, transaction, link, p1, p2, cachedelta=None, node=None, - flags=revlog.REVIDX_DEFAULT_FLAGS): + flags=flagutil.REVIDX_DEFAULT_FLAGS): if b'[NOOP]' in text: flags |= REVIDX_NOOP @@ -102,7 +105,7 @@ # Teach revlog about our test flags flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL] - revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags) + flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags) revlog.REVIDX_FLAGS_ORDER.extend(flags) # Teach exchange to use changegroup 3 @@ -110,7 +113,7 @@ exchange._bundlespeccontentopts[k][b"cg.version"] = b"03" # Register flag processors for each extension - revlog.addflagprocessor( + flagutil.addflagprocessor( REVIDX_NOOP, ( noopdonothing, @@ -118,7 +121,7 @@ validatehash, ) ) - revlog.addflagprocessor( + flagutil.addflagprocessor( REVIDX_BASE64, ( b64decode, @@ -126,7 +129,7 @@ bypass, ), ) - revlog.addflagprocessor( + flagutil.addflagprocessor( REVIDX_GZIP, ( gzipdecompress,
--- a/tests/notcapable Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/notcapable Mon Sep 09 17:26:17 2019 -0400 @@ -6,7 +6,8 @@ fi cat > notcapable-$CAP.py << EOF -from mercurial import extensions, localrepo, repository +from mercurial import extensions, localrepo +from mercurial.interfaces import repository def extsetup(ui): extensions.wrapfunction(repository.peer, 'capable', wrapcapable) extensions.wrapfunction(localrepo.localrepository, 'peer', wrappeer)
--- a/tests/pullext.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/pullext.py Mon Sep 09 17:26:17 2019 -0400 @@ -13,6 +13,8 @@ error, extensions, localrepo, +) +from mercurial.interfaces import ( repository, )
--- a/tests/run-tests.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/run-tests.py Mon Sep 09 17:26:17 2019 -0400 @@ -484,15 +484,9 @@ if 'java' in sys.platform or '__pypy__' in sys.modules: options.pure = True - if options.with_hg: - options.with_hg = canonpath(_bytespath(options.with_hg)) - if not (os.path.isfile(options.with_hg) and - os.access(options.with_hg, os.X_OK)): - parser.error('--with-hg must specify an executable hg script') - if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']: - sys.stderr.write('warning: --with-hg should specify an hg script\n') - sys.stderr.flush() if options.local: + if options.with_hg or options.with_chg: + parser.error('--local cannot be used with --with-hg or --with-chg') testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0]))) reporootdir = os.path.dirname(testdir) pathandattrs = [(b'hg', 'with_hg')] @@ -503,7 +497,16 @@ if os.name != 'nt' and not os.access(binpath, os.X_OK): parser.error('--local specified, but %r not found or ' 'not executable' % binpath) - setattr(options, attr, binpath) + setattr(options, attr, _strpath(binpath)) + + if options.with_hg: + options.with_hg = canonpath(_bytespath(options.with_hg)) + if not (os.path.isfile(options.with_hg) and + os.access(options.with_hg, os.X_OK)): + parser.error('--with-hg must specify an executable hg script') + if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']: + sys.stderr.write('warning: --with-hg should specify an hg script\n') + sys.stderr.flush() if (options.chg or options.with_chg) and os.name == 'nt': parser.error('chg does not work on %s' % os.name) @@ -1299,6 +1302,15 @@ if PYTHON3: bchr = lambda x: bytes([x]) +WARN_UNDEFINED = 1 +WARN_YES = 2 +WARN_NO = 3 + +MARK_OPTIONAL = b" (?)\n" + +def isoptional(line): + return line.endswith(MARK_OPTIONAL) + class TTest(Test): """A "t test" is a test backed by a .t file.""" @@ -1598,114 +1610,127 @@ def _processoutput(self, exitcode, output, salt, after, expected): # Merge the script output back into a unified test. - warnonly = 1 # 1: not yet; 2: yes; 3: for sure not + warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not if exitcode != 0: - warnonly = 3 + warnonly = WARN_NO pos = -1 postout = [] - for l in output: - lout, lcmd = l, None - if salt in l: - lout, lcmd = l.split(salt, 1) - - while lout: - if not lout.endswith(b'\n'): - lout += b' (no-eol)\n' - - # Find the expected output at the current position. - els = [None] - if expected.get(pos, None): - els = expected[pos] - - optional = [] - for i, el in enumerate(els): - r = False - if el: - r, exact = self.linematch(el, lout) - if isinstance(r, str): - if r == '-glob': - lout = ''.join(el.rsplit(' (glob)', 1)) - r = '' # Warn only this line. - elif r == "retry": - postout.append(b' ' + el) - else: - log('\ninfo, unknown linematch result: %r\n' % r) - r = False - if r: - els.pop(i) - break - if el: - if el.endswith(b" (?)\n"): - optional.append(i) - else: - m = optline.match(el) - if m: - conditions = [ - c for c in m.group(2).split(b' ')] - - if not self._iftest(conditions): - optional.append(i) - if exact: - # Don't allow line to be matches against a later - # line in the output - els.pop(i) - break - - if r: - if r == "retry": - continue - # clean up any optional leftovers - for i in optional: - postout.append(b' ' + els[i]) - for i in reversed(optional): - del els[i] - postout.append(b' ' + el) - else: - if self.NEEDESCAPE(lout): - lout = TTest._stringescape(b'%s (esc)\n' % - lout.rstrip(b'\n')) - postout.append(b' ' + lout) # Let diff deal with it. - if r != '': # If line failed. - warnonly = 3 # for sure not - elif warnonly == 1: # Is "not yet" and line is warn only. - warnonly = 2 # Yes do warn. - break - else: - # clean up any optional leftovers - while expected.get(pos, None): - el = expected[pos].pop(0) - if el: - if not el.endswith(b" (?)\n"): - m = optline.match(el) - if m: - conditions = [c for c in m.group(2).split(b' ')] - - if self._iftest(conditions): - # Don't append as optional line - continue - else: - continue - postout.append(b' ' + el) - - if lcmd: - # Add on last return code. - ret = int(lcmd.split()[1]) - if ret != 0: - postout.append(b' [%d]\n' % ret) - if pos in after: - # Merge in non-active test bits. - postout += after.pop(pos) - pos = int(lcmd.split()[0]) + for out_rawline in output: + out_line, cmd_line = out_rawline, None + if salt in out_rawline: + out_line, cmd_line = out_rawline.split(salt, 1) + + pos, postout, warnonly = self._process_out_line(out_line, + pos, + postout, + expected, + warnonly) + pos, postout = self._process_cmd_line(cmd_line, pos, postout, + after) if pos in after: postout += after.pop(pos) - if warnonly == 2: + if warnonly == WARN_YES: exitcode = False # Set exitcode to warned. return exitcode, postout + def _process_out_line(self, out_line, pos, postout, expected, warnonly): + while out_line: + if not out_line.endswith(b'\n'): + out_line += b' (no-eol)\n' + + # Find the expected output at the current position. + els = [None] + if expected.get(pos, None): + els = expected[pos] + + optional = [] + for i, el in enumerate(els): + r = False + if el: + r, exact = self.linematch(el, out_line) + if isinstance(r, str): + if r == '-glob': + out_line = ''.join(el.rsplit(' (glob)', 1)) + r = '' # Warn only this line. + elif r == "retry": + postout.append(b' ' + el) + else: + log('\ninfo, unknown linematch result: %r\n' % r) + r = False + if r: + els.pop(i) + break + if el: + if isoptional(el): + optional.append(i) + else: + m = optline.match(el) + if m: + conditions = [ + c for c in m.group(2).split(b' ')] + + if not self._iftest(conditions): + optional.append(i) + if exact: + # Don't allow line to be matches against a later + # line in the output + els.pop(i) + break + + if r: + if r == "retry": + continue + # clean up any optional leftovers + for i in optional: + postout.append(b' ' + els[i]) + for i in reversed(optional): + del els[i] + postout.append(b' ' + el) + else: + if self.NEEDESCAPE(out_line): + out_line = TTest._stringescape(b'%s (esc)\n' % + out_line.rstrip(b'\n')) + postout.append(b' ' + out_line) # Let diff deal with it. + if r != '': # If line failed. + warnonly = WARN_NO + elif warnonly == WARN_UNDEFINED: + warnonly = WARN_YES + break + else: + # clean up any optional leftovers + while expected.get(pos, None): + el = expected[pos].pop(0) + if el: + if not isoptional(el): + m = optline.match(el) + if m: + conditions = [c for c in m.group(2).split(b' ')] + + if self._iftest(conditions): + # Don't append as optional line + continue + else: + continue + postout.append(b' ' + el) + return pos, postout, warnonly + + def _process_cmd_line(self, cmd_line, pos, postout, after): + """process a "command" part of a line from unified test output""" + if cmd_line: + # Add on last return code. + ret = int(cmd_line.split()[1]) + if ret != 0: + postout.append(b' [%d]\n' % ret) + if pos in after: + # Merge in non-active test bits. + postout += after.pop(pos) + pos = int(cmd_line.split()[0]) + return pos, postout + @staticmethod def rematch(el, l): try: @@ -1753,9 +1778,9 @@ if el == l: # perfect match (fast) return True, True retry = False - if el.endswith(b" (?)\n"): + if isoptional(el): retry = "retry" - el = el[:-5] + b"\n" + el = el[:-len(MARK_OPTIONAL)] + b"\n" else: m = optline.match(el) if m:
--- a/tests/simplestorerepo.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/simplestorerepo.py Mon Sep 09 17:26:17 2019 -0400 @@ -32,16 +32,21 @@ localrepo, mdiff, pycompat, - repository, revlog, store, verify, ) +from mercurial.interfaces import ( + repository, + util as interfaceutil, +) from mercurial.utils import ( cborutil, - interfaceutil, storageutil, ) +from mercurial.revlogutils import ( + flagutil, +) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -86,7 +91,7 @@ node = attr.ib(default=None) @interfaceutil.implementer(repository.ifilestorage) -class filestorage(object): +class filestorage(flagutil.flagprocessorsmixin): """Implements storage for a tracked path. Data is stored in the VFS in a directory corresponding to the tracked @@ -97,6 +102,8 @@ Fulltext data is stored in files having names of the node. """ + _flagserrorclass = simplestoreerror + def __init__(self, svfs, path): self._svfs = svfs self._path = path @@ -114,6 +121,8 @@ self._index = [] self._refreshindex() + self._flagprocessors = dict(flagutil.flagprocessors) + def _refreshindex(self): self._indexbynode.clear() self._indexbyrev.clear() @@ -258,45 +267,6 @@ return True - def _processflags(self, text, flags, operation, raw=False): - if flags == 0: - return text, True - - if flags & ~revlog.REVIDX_KNOWN_FLAGS: - raise simplestoreerror(_("incompatible revision flag '%#x'") % - (flags & ~revlog.REVIDX_KNOWN_FLAGS)) - - validatehash = True - # Depending on the operation (read or write), the order might be - # reversed due to non-commutative transforms. - orderedflags = revlog.REVIDX_FLAGS_ORDER - if operation == 'write': - orderedflags = reversed(orderedflags) - - for flag in orderedflags: - # If a flagprocessor has been registered for a known flag, apply the - # related operation transform and update result tuple. - if flag & flags: - vhash = True - - if flag not in revlog._flagprocessors: - message = _("missing processor for flag '%#x'") % (flag) - raise simplestoreerror(message) - - processor = revlog._flagprocessors[flag] - if processor is not None: - readtransform, writetransform, rawtransform = processor - - if raw: - vhash = rawtransform(self, text) - elif operation == 'read': - text, vhash = readtransform(self, text) - else: # write operation - text, vhash = writetransform(self, text) - validatehash = validatehash and vhash - - return text, validatehash - def checkhash(self, text, node, p1=None, p2=None, rev=None): if p1 is None and p2 is None: p1, p2 = self.parents(node) @@ -320,12 +290,19 @@ path = b'/'.join([self._storepath, hex(node)]) rawtext = self._svfs.read(path) - text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw) + if raw: + validatehash = self._processflagsraw(rawtext, flags) + text = rawtext + else: + text, validatehash = self._processflagsread(rawtext, flags) if validatehash: self.checkhash(text, node, rev=rev) return text + def rawdata(self, nodeorrev): + return self.revision(raw=True) + def read(self, node): validatenode(node) @@ -479,7 +456,7 @@ if flags: node = node or storageutil.hashrevisionsha1(text, p1, p2) - rawtext, validatehash = self._processflags(text, flags, 'write') + rawtext, validatehash = self._processflagswrite(text, flags) node = node or storageutil.hashrevisionsha1(text, p1, p2)
--- a/tests/test-acl.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-acl.t Mon Sep 09 17:26:17 2019 -0400 @@ -131,12 +131,12 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files bundle2-input-part: total payload size 1553 bundle2-input-part: "phase-heads" supported bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -196,7 +196,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: changes have source "push" - skipping bundle2-input-part: total payload size 1553 @@ -204,6 +203,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -263,7 +263,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -281,6 +280,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -340,7 +340,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -409,7 +408,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -483,7 +481,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "barney" acl: acl.allow.branches not enabled @@ -554,7 +551,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -630,7 +626,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -703,7 +698,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "barney" acl: acl.allow.branches not enabled @@ -775,7 +769,6 @@ adding manifests adding file changes adding foo/file.txt revisions - added 1 changesets with 1 changes to 1 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -796,6 +789,7 @@ acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark" bundle2-input-bundle: 6 parts total updating the branch cache + added 1 changesets with 1 changes to 1 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -861,7 +855,6 @@ adding manifests adding file changes adding foo/file.txt revisions - added 1 changesets with 1 changes to 1 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -950,7 +943,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "barney" acl: acl.allow.branches not enabled @@ -968,6 +960,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -1034,7 +1027,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "wilma" acl: acl.allow.branches not enabled @@ -1116,7 +1108,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "barney" error: pretxnchangegroup.acl hook raised an exception: [Errno *] * (glob) @@ -1193,7 +1184,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "betty" acl: acl.allow.branches not enabled @@ -1281,7 +1271,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "barney" acl: acl.allow.branches not enabled @@ -1299,6 +1288,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -1369,7 +1359,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -1387,6 +1376,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -1453,7 +1443,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -1534,7 +1523,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -1553,6 +1541,7 @@ bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 3 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -1619,7 +1608,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 3 changesets with 3 changes to 3 files calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "fred" acl: acl.allow.branches not enabled @@ -1743,7 +1731,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "astro" acl: acl.allow.branches not enabled @@ -1763,6 +1750,7 @@ bundle2-input-part: total payload size 48 bundle2-input-bundle: 4 parts total updating the branch cache + added 4 changesets with 4 changes to 4 files (+1 heads) bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -1829,7 +1817,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "astro" acl: acl.allow.branches not enabled @@ -1908,7 +1895,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "astro" acl: acl.allow.branches enabled, 0 entries for user astro @@ -1983,7 +1969,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "astro" acl: acl.allow.branches enabled, 0 entries for user astro @@ -2052,7 +2037,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "george" acl: acl.allow.branches enabled, 1 entries for user george @@ -2072,6 +2056,7 @@ bundle2-input-part: total payload size 48 bundle2-input-bundle: 4 parts total updating the branch cache + added 4 changesets with 4 changes to 4 files (+1 heads) bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -2143,7 +2128,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "george" acl: acl.allow.branches enabled, 1 entries for user george @@ -2163,6 +2147,7 @@ bundle2-input-part: total payload size 48 bundle2-input-bundle: 4 parts total updating the branch cache + added 4 changesets with 4 changes to 4 files (+1 heads) bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -2233,7 +2218,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "george" acl: acl.allow.branches not enabled @@ -2307,7 +2291,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "astro" acl: acl.allow.branches not enabled @@ -2327,6 +2310,7 @@ bundle2-input-part: total payload size 48 bundle2-input-bundle: 4 parts total updating the branch cache + added 4 changesets with 4 changes to 4 files (+1 heads) bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -2391,7 +2375,6 @@ adding foo/Bar/file.txt revisions adding foo/file.txt revisions adding quux/file.py revisions - added 4 changesets with 4 changes to 4 files (+1 heads) calling hook pretxnchangegroup.acl: hgext.acl.hook acl: checking access for user "george" acl: acl.allow.branches not enabled
--- a/tests/test-bisect.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bisect.t Mon Sep 09 17:26:17 2019 -0400 @@ -581,6 +581,7 @@ --------------------- $ hg debugobsolete `hg id --debug -i -r tip` + 1 new obsolescence markers obsoleted 1 changesets $ hg bisect --reset $ hg bisect --good 15 @@ -609,6 +610,7 @@ $ hg commit -m 'msg 30 -- fixed' created new head $ hg debugobsolete `hg id --debug -i -r 30` `hg id --debug -i -r .` + 1 new obsolescence markers obsoleted 1 changesets $ hg bisect The first bad revision is:
--- a/tests/test-blackbox.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-blackbox.t Mon Sep 09 17:26:17 2019 -0400 @@ -140,7 +140,7 @@ comparing with $TESTTMP/blackboxtest query 1; heads searching for changes - all local heads known remotely + all local changesets known remotely changeset: 2:d02f48003e62c24e2659d97d30f2a83abe5d5d51 tag: tip phase: draft
--- a/tests/test-bookflow.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bookflow.t Mon Sep 09 17:26:17 2019 -0400 @@ -242,8 +242,8 @@ $ echo "more" >> test $ hg pull -u 2>&1 | fgrep -v TESTTMP| fgrep -v "searching for changes" | fgrep -v adding pulling from $TESTTMP/a + updating bookmark X added 1 changesets with 0 changes to 0 files (+1 heads) - updating bookmark X new changesets * (glob) updating to active bookmark X merging test
--- a/tests/test-bookmarks-corner-case.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bookmarks-corner-case.t Mon Sep 09 17:26:17 2019 -0400 @@ -119,7 +119,7 @@ > import atexit > import os > import time - > from mercurial import error, extensions, bookmarks + > from mercurial import bookmarks, error, extensions > > def wait(repo): > if not os.path.exists('push-A-started'): @@ -200,8 +200,8 @@ $ cat push-output.txt pushing to ssh://user@dummy/bookrace-server searching for changes + remote: setting raced push up remote has heads on branch 'default' that are not known locally: f26c3b5167d1 - remote: setting raced push up remote: adding changesets remote: adding manifests remote: adding file changes
--- a/tests/test-bookmarks-pushpull.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bookmarks-pushpull.t Mon Sep 09 17:26:17 2019 -0400 @@ -51,10 +51,10 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files adding remote bookmark X updating bookmark Y adding remote bookmark Z + added 1 changesets with 1 changes to 1 files new changesets 4e3505fd9583 (1 drafts) test-hook-bookmark: X: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77 test-hook-bookmark: Y: 0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77 @@ -414,10 +414,10 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) divergent bookmark @ stored as @foo divergent bookmark X stored as X@foo updating bookmark Z + added 1 changesets with 1 changes to 1 files (+1 heads) new changesets 0d2164f0ce0d (1 drafts) test-hook-bookmark: @foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c test-hook-bookmark: X@foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c @@ -580,8 +580,8 @@ adding changesets adding manifests adding file changes + updating bookmark Y added 1 changesets with 1 changes to 1 files - updating bookmark Y new changesets b0a5eff05604 (1 drafts) (run 'hg update' to get a working copy) $ hg book @@ -629,8 +629,8 @@ adding changesets adding manifests adding file changes + updating bookmark Y added 1 changesets with 1 changes to 1 files - updating bookmark Y new changesets 35d1ef0a8d1b (1 drafts) (run 'hg update' to get a working copy) $ hg book @@ -672,8 +672,8 @@ adding changesets adding manifests adding file changes + updating bookmark Y added 1 changesets with 1 changes to 1 files - updating bookmark Y new changesets 0d60821d2197 (1 drafts) (run 'hg update' to get a working copy) $ hg book @@ -742,6 +742,7 @@ Unrelated marker does not alter the decision $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + 1 new obsolescence markers $ hg push http://localhost:$HGPORT2/ pushing to http://localhost:$HGPORT2/ searching for changes @@ -763,8 +764,10 @@ $ hg id --debug -r 5 c922c0139ca03858f655e4a2af4dd02796a63969 tip Y $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f + 1 new obsolescence markers $ hg push http://localhost:$HGPORT2/ pushing to http://localhost:$HGPORT2/ searching for changes
--- a/tests/test-bookmarks.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bookmarks.t Mon Sep 09 17:26:17 2019 -0400 @@ -762,9 +762,9 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) updating bookmark Y updating bookmark Z + added 2 changesets with 2 changes to 2 files (+1 heads) new changesets 125c9a1d6df6:9ba5f110a0b3 (run 'hg heads' to see heads, 'hg merge' to merge) @@ -788,9 +788,9 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) updating bookmark Y updating bookmark Z + added 2 changesets with 2 changes to 2 files (+1 heads) new changesets 125c9a1d6df6:9ba5f110a0b3 updating to active bookmark Y 1 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -813,9 +813,9 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) updating bookmark Y updating bookmark Z + added 2 changesets with 2 changes to 2 files (+1 heads) new changesets 125c9a1d6df6:9ba5f110a0b3 (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R ../cloned-bookmarks-manual-update-with-divergence update @@ -996,11 +996,11 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files divergent bookmark Z stored as Z@default adding remote bookmark foo adding remote bookmark four adding remote bookmark should-end-on-two + added 1 changesets with 1 changes to 1 files new changesets 5fb12f0f2d51 0 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg -R ../cloned-bookmarks-update parents -T "{rev}:{node|short}\n" @@ -1023,8 +1023,8 @@ adding changesets adding manifests adding file changes + divergent bookmark Z stored as Z@default added 1 changesets with 1 changes to 1 files - divergent bookmark Z stored as Z@default new changesets 81dcce76aa0b 1 files updated, 0 files merged, 0 files removed, 0 files unresolved updating bookmark Y
--- a/tests/test-bundle2-exchange.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bundle2-exchange.t Mon Sep 09 17:26:17 2019 -0400 @@ -58,8 +58,8 @@ adding changesets adding manifests adding file changes + pre-close-tip:02de42196ebe draft added 8 changesets with 7 changes to 7 files (+3 heads) - pre-close-tip:02de42196ebe draft new changesets cd010b8cd998:02de42196ebe (8 drafts) postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle @@ -75,10 +75,12 @@ $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc` pre-close-tip:02de42196ebe draft + 1 new obsolescence markers postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c` pre-close-tip:02de42196ebe draft + 1 new obsolescence markers postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete @@ -92,9 +94,9 @@ adding changesets adding manifests adding file changes + pre-close-tip:9520eea781bc draft added 2 changesets with 2 changes to 2 files 1 new obsolescence markers - pre-close-tip:9520eea781bc draft new changesets cd010b8cd998:9520eea781bc (1 drafts) postclose-tip:9520eea781bc draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull @@ -121,9 +123,9 @@ adding changesets adding manifests adding file changes + pre-close-tip:24b6387c8c8c draft added 1 changesets with 1 changes to 1 files (+1 heads) 1 new obsolescence markers - pre-close-tip:24b6387c8c8c draft new changesets 24b6387c8c8c (1 drafts) postclose-tip:24b6387c8c8c draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull @@ -193,6 +195,7 @@ txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a` pre-close-tip:02de42196ebe draft + 1 new obsolescence markers postclose-tip:02de42196ebe draft txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete $ hg -R main bookmark --rev 02de42196ebe book_02de @@ -201,6 +204,7 @@ txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe` pre-close-tip:02de42196ebe draft book_02de + 1 new obsolescence markers postclose-tip:02de42196ebe draft book_02de txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc @@ -209,6 +213,7 @@ txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16` pre-close-tip:02de42196ebe draft book_02de + 1 new obsolescence markers postclose-tip:02de42196ebe draft book_02de txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd @@ -217,6 +222,7 @@ txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8` pre-close-tip:02de42196ebe draft book_02de + 1 new obsolescence markers postclose-tip:02de42196ebe draft book_02de txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete $ hg -R main bookmark --rev 32af7686d403 book_32af @@ -225,6 +231,7 @@ txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403` pre-close-tip:02de42196ebe draft book_02de + 1 new obsolescence markers postclose-tip:02de42196ebe draft book_02de txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete @@ -261,9 +268,9 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: pre-close-tip:eea13746799a public book_eea1 remote: added 1 changesets with 0 changes to 0 files (-1 heads) remote: 1 new obsolescence markers - remote: pre-close-tip:eea13746799a public book_eea1 remote: pushkey: lock state after "bookmarks" remote: lock: free remote: wlock: free @@ -296,10 +303,10 @@ adding changesets adding manifests adding file changes + updating bookmark book_02de + pre-close-tip:02de42196ebe draft book_02de added 1 changesets with 1 changes to 1 files (+1 heads) 1 new obsolescence markers - updating bookmark book_02de - pre-close-tip:02de42196ebe draft book_02de new changesets 02de42196ebe (1 drafts) postclose-tip:02de42196ebe draft book_02de txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull @@ -322,10 +329,10 @@ adding changesets adding manifests adding file changes + updating bookmark book_42cc + pre-close-tip:42ccdea3bb16 draft book_42cc added 1 changesets with 1 changes to 1 files (+1 heads) 1 new obsolescence markers - updating bookmark book_42cc - pre-close-tip:42ccdea3bb16 draft book_42cc new changesets 42ccdea3bb16 (1 drafts) postclose-tip:42ccdea3bb16 draft book_42cc txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull @@ -347,9 +354,9 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: pre-close-tip:5fddd98957c8 draft book_5fdd remote: added 1 changesets with 1 changes to 1 files remote: 1 new obsolescence markers - remote: pre-close-tip:5fddd98957c8 draft book_5fdd remote: pushkey: lock state after "bookmarks" remote: lock: free remote: wlock: free @@ -398,9 +405,9 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: pre-close-tip:32af7686d403 public book_32af remote: added 1 changesets with 1 changes to 1 files remote: 1 new obsolescence markers - remote: pre-close-tip:32af7686d403 public book_32af remote: pushkey: lock state after "bookmarks" remote: lock: free remote: wlock: free @@ -624,7 +631,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: pre-close-tip:e7ec4e813ba6 draft remote: You shall not pass! remote: transaction abort! @@ -639,7 +645,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: pre-close-tip:e7ec4e813ba6 draft remote: You shall not pass! remote: transaction abort! @@ -655,7 +660,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: pre-close-tip:e7ec4e813ba6 draft remote: You shall not pass! remote: transaction abort! @@ -689,7 +693,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: Fail early! remote: transaction abort! remote: Cleaning up the mess... @@ -702,7 +705,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: Fail early! remote: transaction abort! remote: Cleaning up the mess... @@ -716,7 +718,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: Fail early! remote: transaction abort! remote: Cleaning up the mess... @@ -740,7 +741,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files Fail early! transaction abort! Cleaning up the mess... @@ -753,7 +753,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: Fail early! remote: transaction abort! remote: Cleaning up the mess... @@ -767,7 +766,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: Fail early! remote: transaction abort! remote: Cleaning up the mess... @@ -815,7 +813,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files do not push the key ! pushkey-abort: prepushkey.failpush hook exited with status 1 transaction abort! @@ -829,7 +826,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: do not push the key ! remote: pushkey-abort: prepushkey.failpush hook exited with status 1 remote: transaction abort! @@ -843,7 +839,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: do not push the key ! remote: pushkey-abort: prepushkey.failpush hook exited with status 1 remote: transaction abort! @@ -885,7 +880,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files transaction abort! Cleaning up the mess... rollback completed @@ -900,7 +894,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: transaction abort! remote: Cleaning up the mess... remote: rollback completed @@ -915,7 +908,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: transaction abort! remote: Cleaning up the mess... remote: rollback completed
--- a/tests/test-bundle2-format.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bundle2-format.t Mon Sep 09 17:26:17 2019 -0400 @@ -1010,6 +1010,7 @@ $ hg bundle2 --rev '8+7+5+4' --reply ../rev-rr.hg2 $ hg unbundle2 ../rev-reply.hg2 < ../rev-rr.hg2 + added 0 changesets with 0 changes to 3 files 0 unread bytes addchangegroup return: 1 @@ -1021,13 +1022,11 @@ 0030: 2d 74 6f 31 72 65 74 75 72 6e 31 00 00 00 00 00 |-to1return1.....| 0040: 00 00 1b 06 6f 75 74 70 75 74 00 00 00 01 00 01 |....output......| 0050: 0b 01 69 6e 2d 72 65 70 6c 79 2d 74 6f 31 00 00 |..in-reply-to1..| - 0060: 00 64 61 64 64 69 6e 67 20 63 68 61 6e 67 65 73 |.dadding changes| + 0060: 00 37 61 64 64 69 6e 67 20 63 68 61 6e 67 65 73 |.7adding changes| 0070: 65 74 73 0a 61 64 64 69 6e 67 20 6d 61 6e 69 66 |ets.adding manif| 0080: 65 73 74 73 0a 61 64 64 69 6e 67 20 66 69 6c 65 |ests.adding file| - 0090: 20 63 68 61 6e 67 65 73 0a 61 64 64 65 64 20 30 | changes.added 0| - 00a0: 20 63 68 61 6e 67 65 73 65 74 73 20 77 69 74 68 | changesets with| - 00b0: 20 30 20 63 68 61 6e 67 65 73 20 74 6f 20 33 20 | 0 changes to 3 | - 00c0: 66 69 6c 65 73 0a 00 00 00 00 00 00 00 00 |files.........| + 0090: 20 63 68 61 6e 67 65 73 0a 00 00 00 00 00 00 00 | changes........| + 00a0: 00 |.| Check handling of exception during generation. ----------------------------------------------
--- a/tests/test-bundle2-multiple-changegroups.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bundle2-multiple-changegroups.t Mon Sep 09 17:26:17 2019 -0400 @@ -80,7 +80,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 @@ -96,7 +95,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 @@ -109,6 +107,7 @@ file:/*/$TESTTMP/repo (glob) HG_URL=file:$TESTTMP/repo + added 2 changesets with 2 changes to 2 files new changesets 27547f69f254:f838bfaca5c7 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup @@ -208,7 +207,6 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e @@ -224,7 +222,6 @@ adding changesets adding manifests adding file changes - added 3 changesets with 3 changes to 3 files (+1 heads) pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 @@ -237,6 +234,7 @@ file:/*/$TESTTMP/repo (glob) HG_URL=file:$TESTTMP/repo + added 5 changesets with 5 changes to 5 files (+2 heads) new changesets b3325c91a4d9:5cd59d311f65 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup @@ -365,7 +363,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (-1 heads) pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 @@ -381,7 +378,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 @@ -394,6 +390,7 @@ file:/*/$TESTTMP/repo (glob) HG_URL=file:$TESTTMP/repo + added 2 changesets with 1 changes to 1 files (-1 heads) new changesets 71bd7b46de72:9d18e5bd9ab0 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup
--- a/tests/test-bundle2-remote-changegroup.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-bundle2-remote-changegroup.t Mon Sep 09 17:26:17 2019 -0400 @@ -202,12 +202,11 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) remote: changegroup adding changesets adding manifests adding file changes - added 3 changesets with 2 changes to 2 files (+1 heads) + added 5 changesets with 4 changes to 4 files (+2 heads) new changesets 32af7686d403:02de42196ebe (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R clone log -G @@ -252,12 +251,11 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) remote: remote-changegroup adding changesets adding manifests adding file changes - added 3 changesets with 2 changes to 2 files (+1 heads) + added 5 changesets with 4 changes to 4 files (+2 heads) new changesets 32af7686d403:02de42196ebe (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R clone log -G @@ -305,17 +303,15 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) remote: remote-changegroup adding changesets adding manifests adding file changes - added 2 changesets with 1 changes to 1 files remote: changegroup adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) + added 5 changesets with 4 changes to 4 files (+2 heads) new changesets 32af7686d403:02de42196ebe (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R clone log -G @@ -383,7 +379,6 @@ adding changesets adding manifests adding file changes - added 8 changesets with 7 changes to 7 files (+2 heads) transaction abort! rollback completed abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted: @@ -418,7 +413,6 @@ adding changesets adding manifests adding file changes - added 8 changesets with 7 changes to 7 files (+2 heads) transaction abort! rollback completed abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted: @@ -434,7 +428,6 @@ adding changesets adding manifests adding file changes - added 8 changesets with 7 changes to 7 files (+2 heads) transaction abort! rollback completed abort: bundle at http://localhost:$HGPORT/bundle6.hg is corrupted: @@ -464,12 +457,10 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) remote: remote-changegroup adding changesets adding manifests adding file changes - added 2 changesets with 1 changes to 1 files transaction abort! rollback completed abort: bundle at http://localhost:$HGPORT/bundle5.hg is corrupted: @@ -534,7 +525,6 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) transaction abort! rollback completed abort: bundle at http://localhost:$HGPORT/bundle4.hg is corrupted:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-byteify-strings.t Mon Sep 09 17:26:17 2019 -0400 @@ -0,0 +1,266 @@ +#require py3 + + $ byteify_strings () { + > $PYTHON "$TESTDIR/../contrib/byteify-strings.py" "$@" + > } + +Test version + + $ byteify_strings --version + Byteify strings * (glob) + +Test in-place + + $ cat > testfile.py <<EOF + > obj['test'] = b"1234" + > mydict.iteritems() + > EOF + $ byteify_strings testfile.py -i + $ cat testfile.py + obj[b'test'] = b"1234" + mydict.iteritems() + +Test with dictiter + + $ cat > testfile.py <<EOF + > obj['test'] = b"1234" + > mydict.iteritems() + > EOF + $ byteify_strings testfile.py --dictiter + obj[b'test'] = b"1234" + mydict.items() + +Test kwargs-like objects + + $ cat > testfile.py <<EOF + > kwargs['test'] = "123" + > kwargs[test['testing']] + > kwargs[test[[['testing']]]] + > kwargs[kwargs['testing']] + > kwargs.get('test') + > kwargs.pop('test') + > kwargs.get('test', 'testing') + > kwargs.pop('test', 'testing') + > kwargs.setdefault('test', 'testing') + > + > opts['test'] = "123" + > opts[test['testing']] + > opts[test[[['testing']]]] + > opts[opts['testing']] + > opts.get('test') + > opts.pop('test') + > opts.get('test', 'testing') + > opts.pop('test', 'testing') + > opts.setdefault('test', 'testing') + > + > commitopts['test'] = "123" + > commitopts[test['testing']] + > commitopts[test[[['testing']]]] + > commitopts[commitopts['testing']] + > commitopts.get('test') + > commitopts.pop('test') + > commitopts.get('test', 'testing') + > commitopts.pop('test', 'testing') + > commitopts.setdefault('test', 'testing') + > EOF + $ byteify_strings testfile.py --treat-as-kwargs kwargs opts commitopts + kwargs['test'] = b"123" + kwargs[test[b'testing']] + kwargs[test[[[b'testing']]]] + kwargs[kwargs['testing']] + kwargs.get('test') + kwargs.pop('test') + kwargs.get('test', b'testing') + kwargs.pop('test', b'testing') + kwargs.setdefault('test', b'testing') + + opts['test'] = b"123" + opts[test[b'testing']] + opts[test[[[b'testing']]]] + opts[opts['testing']] + opts.get('test') + opts.pop('test') + opts.get('test', b'testing') + opts.pop('test', b'testing') + opts.setdefault('test', b'testing') + + commitopts['test'] = b"123" + commitopts[test[b'testing']] + commitopts[test[[[b'testing']]]] + commitopts[commitopts['testing']] + commitopts.get('test') + commitopts.pop('test') + commitopts.get('test', b'testing') + commitopts.pop('test', b'testing') + commitopts.setdefault('test', b'testing') + +Test attr*() as methods + + $ cat > testfile.py <<EOF + > setattr(o, 'a', 1) + > util.setattr(o, 'ae', 1) + > util.getattr(o, 'alksjdf', 'default') + > util.addattr(o, 'asdf') + > util.hasattr(o, 'lksjdf', 'default') + > util.safehasattr(o, 'lksjdf', 'default') + > @eh.wrapfunction(func, 'lksjdf') + > def f(): + > pass + > @eh.wrapclass(klass, 'lksjdf') + > def f(): + > pass + > EOF + $ byteify_strings testfile.py --allow-attr-methods + setattr(o, 'a', 1) + util.setattr(o, 'ae', 1) + util.getattr(o, 'alksjdf', b'default') + util.addattr(o, 'asdf') + util.hasattr(o, 'lksjdf', b'default') + util.safehasattr(o, 'lksjdf', b'default') + @eh.wrapfunction(func, 'lksjdf') + def f(): + pass + @eh.wrapclass(klass, 'lksjdf') + def f(): + pass + +Test without attr*() as methods + + $ cat > testfile.py <<EOF + > setattr(o, 'a', 1) + > util.setattr(o, 'ae', 1) + > util.getattr(o, 'alksjdf', 'default') + > util.addattr(o, 'asdf') + > util.hasattr(o, 'lksjdf', 'default') + > util.safehasattr(o, 'lksjdf', 'default') + > @eh.wrapfunction(func, 'lksjdf') + > def f(): + > pass + > @eh.wrapclass(klass, 'lksjdf') + > def f(): + > pass + > EOF + $ byteify_strings testfile.py + setattr(o, 'a', 1) + util.setattr(o, b'ae', 1) + util.getattr(o, b'alksjdf', b'default') + util.addattr(o, b'asdf') + util.hasattr(o, b'lksjdf', b'default') + util.safehasattr(o, b'lksjdf', b'default') + @eh.wrapfunction(func, b'lksjdf') + def f(): + pass + @eh.wrapclass(klass, b'lksjdf') + def f(): + pass + +Test ignore comments + + $ cat > testfile.py <<EOF + > # py3-transform: off + > "none" + > "of" + > 'these' + > s = """should""" + > d = '''be''' + > # py3-transform: on + > "this should" + > 'and this also' + > + > # no-py3-transform + > l = "this should be ignored" + > l2 = "this shouldn't" + > + > EOF + $ byteify_strings testfile.py + # py3-transform: off + "none" + "of" + 'these' + s = """should""" + d = '''be''' + # py3-transform: on + b"this should" + b'and this also' + + # no-py3-transform + l = "this should be ignored" + l2 = b"this shouldn't" + +Test triple-quoted strings + + $ cat > testfile.py <<EOF + > """This is ignored + > """ + > + > line = """ + > This should not be + > """ + > line = ''' + > Neither should this + > ''' + > EOF + $ byteify_strings testfile.py + """This is ignored + """ + + line = b""" + This should not be + """ + line = b''' + Neither should this + ''' + +Test prefixed strings + + $ cat > testfile.py <<EOF + > obj['test'] = b"1234" + > obj[r'test'] = u"1234" + > EOF + $ byteify_strings testfile.py + obj[b'test'] = b"1234" + obj[r'test'] = u"1234" + +Test multi-line alignment + + $ cat > testfile.py <<'EOF' + > def foo(): + > error.Abort(_("foo" + > "bar" + > "%s") + > % parameter) + > { + > 'test': dict, + > 'test2': dict, + > } + > [ + > "thing", + > "thing2" + > ] + > ( + > "tuple", + > "tuple2", + > ) + > {"thing", + > } + > EOF + $ byteify_strings testfile.py + def foo(): + error.Abort(_(b"foo" + b"bar" + b"%s") + % parameter) + { + b'test': dict, + b'test2': dict, + } + [ + b"thing", + b"thing2" + ] + ( + b"tuple", + b"tuple2", + ) + {b"thing", + }
--- a/tests/test-cache-abuse.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-cache-abuse.t Mon Sep 09 17:26:17 2019 -0400 @@ -24,6 +24,7 @@ $ echo dumb > dumb $ hg ci -qAmdumb $ hg debugobsolete b1174d11b69e63cb0c5726621a43c859f0858d7f + 1 new obsolescence markers obsoleted 1 changesets $ hg phase -pr t1
--- a/tests/test-check-code.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-check-code.t Mon Sep 09 17:26:17 2019 -0400 @@ -16,6 +16,7 @@ Skipping contrib/automation/hgautomation/aws.py it has no-che?k-code (glob) Skipping contrib/automation/hgautomation/cli.py it has no-che?k-code (glob) Skipping contrib/automation/hgautomation/linux.py it has no-che?k-code (glob) + Skipping contrib/automation/hgautomation/pypi.py it has no-che?k-code (glob) Skipping contrib/automation/hgautomation/ssh.py it has no-che?k-code (glob) Skipping contrib/automation/hgautomation/windows.py it has no-che?k-code (glob) Skipping contrib/automation/hgautomation/winrm.py it has no-che?k-code (glob)
--- a/tests/test-check-interfaces.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-check-interfaces.py Mon Sep 09 17:26:17 2019 -0400 @@ -14,6 +14,9 @@ 'test-repo']): sys.exit(80) +from mercurial.interfaces import ( + repository, +) from mercurial.thirdparty.zope import ( interface as zi, ) @@ -27,7 +30,6 @@ localrepo, manifest, pycompat, - repository, revlog, sshpeer, statichttprepo,
--- a/tests/test-clone-uncompressed.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-clone-uncompressed.t Mon Sep 09 17:26:17 2019 -0400 @@ -537,6 +537,7 @@ $ echo foo > foo $ hg -q commit -m 'about to be pruned' $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents + 1 new obsolescence markers obsoleted 1 changesets $ hg up null -q $ hg log -T '{rev}: {phase}\n'
--- a/tests/test-clone.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-clone.t Mon Sep 09 17:26:17 2019 -0400 @@ -759,6 +759,7 @@ $ echo initial2 > foo $ hg -q commit -A -m initial1 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8 + 1 new obsolescence markers obsoleted 1 changesets $ cd .. @@ -867,9 +868,9 @@ adding changesets adding manifests adding file changes - added 4 changesets with 4 changes to 1 files (+4 heads) adding remote bookmark head1 adding remote bookmark head2 + added 4 changesets with 4 changes to 1 files (+4 heads) new changesets 4a8dc1ab4c13:6bacf4683960 updating working directory 1 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -995,9 +996,9 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) adding remote bookmark head1 adding remote bookmark head2 + added 1 changesets with 1 changes to 1 files (+1 heads) new changesets 99f71071f117 updating working directory 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-commandserver.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-commandserver.t Mon Sep 09 17:26:17 2019 -0400 @@ -549,6 +549,7 @@ *** runcommand up null 0 files updated, 0 files merged, 1 files removed, 0 files unresolved *** runcommand phase -df tip + 1 new obsolescence markers obsoleted 1 changesets *** runcommand log --hidden changeset: 1:731265503d86
--- a/tests/test-completion.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-completion.t Mon Sep 09 17:26:17 2019 -0400 @@ -312,7 +312,7 @@ debuguigetpass: prompt debuguiprompt: prompt debugupdatecaches: - debugupgraderepo: optimize, run, backup + debugupgraderepo: optimize, run, backup, changelog, manifest debugwalk: include, exclude debugwhyunstable: debugwireargs: three, four, five, ssh, remotecmd, insecure
--- a/tests/test-config.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-config.t Mon Sep 09 17:26:17 2019 -0400 @@ -57,11 +57,13 @@ $ hg showconfig Section -Tjson [ { + "defaultvalue": null, "name": "Section.KeY", "source": "*.hgrc:*", (glob) "value": "Case Sensitive" }, { + "defaultvalue": null, "name": "Section.key", "source": "*.hgrc:*", (glob) "value": "lower case" @@ -70,14 +72,15 @@ $ hg showconfig Section.KeY -Tjson [ { + "defaultvalue": null, "name": "Section.KeY", "source": "*.hgrc:*", (glob) "value": "Case Sensitive" } ] $ hg showconfig -Tjson | tail -7 - }, { + "defaultvalue": null, "name": "*", (glob) "source": "*", (glob) "value": "*" (glob) @@ -102,6 +105,7 @@ $ hg config empty.source -Tjson [ { + "defaultvalue": null, "name": "empty.source", "source": "", "value": "value"
--- a/tests/test-copies-in-changeset.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-copies-in-changeset.t Mon Sep 09 17:26:17 2019 -0400 @@ -10,6 +10,7 @@ > showcopies = log -r . -T '{file_copies % "{source} -> {name}\n"}' > [extensions] > rebase = + > split = > EOF Check that copies are recorded correctly @@ -132,6 +133,16 @@ a -> j $ hg showcopies --config experimental.copies.read-from=filelog-only a -> j +Existing copy information in the changeset gets removed on amend and writing +copy information on to the filelog + $ hg ci --amend -m 'copy a to j, v2' \ + > --config experimental.copies.write-to=filelog-only + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/dd7bb9581359-a6e6b6d2-amend.hg + $ hg changesetcopies + files: j + + $ hg showcopies --config experimental.copies.read-from=filelog-only + a -> j The entries should be written to extras even if they're empty (so the client won't have to fall back to reading from filelogs) $ echo x >> j @@ -185,3 +196,56 @@ a R a $ cd .. + +Test splitting a commit + + $ hg init split + $ cd split + $ echo a > a + $ echo b > b + $ hg ci -Aqm 'add a and b' + $ echo a2 > a + $ hg mv b c + $ hg ci -m 'modify a, move b to c' + $ hg --config ui.interactive=yes split <<EOF + > y + > y + > n + > y + > EOF + diff --git a/a b/a + 1 hunks, 1 lines changed + examine changes to 'a'? + (enter ? for help) [Ynesfdaq?] y + + @@ -1,1 +1,1 @@ + -a + +a2 + record this change to 'a'? + (enter ? for help) [Ynesfdaq?] y + + diff --git a/b b/c + rename from b + rename to c + examine changes to 'b' and 'c'? + (enter ? for help) [Ynesfdaq?] n + + created new head + diff --git a/b b/c + rename from b + rename to c + examine changes to 'b' and 'c'? + (enter ? for help) [Ynesfdaq?] y + + saved backup bundle to $TESTTMP/split/.hg/strip-backup/9a396d463e04-2d9e6864-split.hg + $ cd .. + +Test committing half a rename + + $ hg init partial + $ cd partial + $ echo a > a + $ hg ci -Aqm 'add a' + $ hg mv a b + $ hg ci -m 'remove a' a + $ cd ..
--- a/tests/test-debugcommands.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-debugcommands.t Mon Sep 09 17:26:17 2019 -0400 @@ -546,7 +546,12 @@ .hg/cache/rbc-revs-v1 .hg/cache/rbc-names-v1 .hg/cache/hgtagsfnodes1 + .hg/cache/branch2-visible-hidden + .hg/cache/branch2-visible + .hg/cache/branch2-served.hidden .hg/cache/branch2-served + .hg/cache/branch2-immutable + .hg/cache/branch2-base Test debugcolor
--- a/tests/test-eol-hook.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-eol-hook.t Mon Sep 09 17:26:17 2019 -0400 @@ -39,7 +39,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files error: pretxnchangegroup hook failed: end-of-line check failed: a.txt in a8ee6548cd86 should not have CRLF line endings transaction abort! @@ -67,7 +66,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files error: pretxnchangegroup hook failed: end-of-line check failed: crlf.txt in 004ba2132725 should not have LF line endings transaction abort! @@ -95,7 +93,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files error: pretxnchangegroup hook failed: end-of-line check failed: b.txt in fbcf9b1025f5 should not have CRLF line endings transaction abort! @@ -116,7 +113,6 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) error: pretxnchangegroup hook failed: end-of-line check failed: b.txt in fbcf9b1025f5 should not have CRLF line endings transaction abort! @@ -137,7 +133,6 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) error: pretxnchangegroup hook failed: end-of-line check failed: b.txt in fbcf9b1025f5 should not have CRLF line endings transaction abort! @@ -174,7 +169,6 @@ adding changesets adding manifests adding file changes - added 3 changesets with 3 changes to 2 files (+1 heads) error: pretxnchangegroup hook failed: end-of-line check failed: b.txt in fbcf9b1025f5 should not have CRLF line endings transaction abort! @@ -204,7 +198,6 @@ adding changesets adding manifests adding file changes - added 3 changesets with 3 changes to 2 files (+1 heads) error: pretxnchangegroup hook failed: end-of-line check failed: b.txt in fbcf9b1025f5 should not have CRLF line endings d.txt in a7040e68714f should not have CRLF line endings
--- a/tests/test-exchange-obsmarkers-case-A1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A1.t Mon Sep 09 17:26:17 2019 -0400 @@ -53,6 +53,7 @@ $ cd main $ mkcommit A $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg log -G @ f5bc6836db60 (draft): A | @@ -201,6 +202,7 @@ o a9bdc8b26820 (public): O $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ inspect_obsmarkers obsstore content ================
--- a/tests/test-exchange-obsmarkers-case-A2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A2.t Mon Sep 09 17:26:17 2019 -0400 @@ -56,11 +56,13 @@ $ cd main $ mkcommit A $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg up '.~1' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B created new head $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B)'` + 1 new obsolescence markers $ hg log -G @ 35b183996678 (draft): B |
--- a/tests/test-exchange-obsmarkers-case-A3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A3.t Mon Sep 09 17:26:17 2019 -0400 @@ -73,9 +73,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6298a8ac3a4 (draft): B1 @@ -163,9 +165,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6298a8ac3a4 (draft): B1
--- a/tests/test-exchange-obsmarkers-case-A4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A4.t Mon Sep 09 17:26:17 2019 -0400 @@ -63,7 +63,9 @@ $ mkcommit A1 created new head $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-A5.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A5.t Mon Sep 09 17:26:17 2019 -0400 @@ -65,9 +65,12 @@ created new head $ mkcommit A1 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 8c0a98c83722 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A6.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A6.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,6 +64,7 @@ create a marker after this $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A7.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-A7.t Mon Sep 09 17:26:17 2019 -0400 @@ -51,6 +51,7 @@ $ hg push -q ../pushdest $ hg push -q ../pulldest $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg log -G --hidden @ f5bc6836db60 (draft): A |
--- a/tests/test-exchange-obsmarkers-case-B5.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-B5.t Mon Sep 09 17:26:17 2019 -0400 @@ -70,10 +70,13 @@ created new head $ mkcommit B1 $ hg debugobsolete --hidden `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 2 new orphan changesets $ hg debugobsolete --hidden aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(B0)'` + 1 new obsolescence markers $ hg debugobsolete --hidden `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' 'desc(B1)' $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-B6.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-B6.t Mon Sep 09 17:26:17 2019 -0400 @@ -57,6 +57,7 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-C2.t Mon Sep 09 17:26:17 2019 -0400 @@ -62,6 +62,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-C3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-C3.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,6 +64,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-C4.t Mon Sep 09 17:26:17 2019 -0400 @@ -65,8 +65,10 @@ $ mkcommit C created new head $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(B)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(C)'` + 1 new obsolescence markers 2 new content-divergent changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-D1.t Mon Sep 09 17:26:17 2019 -0400 @@ -61,9 +61,11 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg prune -d '0 0' 'desc(B)' + 1 new obsolescence markers obsoleted 1 changesets $ hg strip --hidden -q 'desc(A0)' $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-D2.t Mon Sep 09 17:26:17 2019 -0400 @@ -54,8 +54,10 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune --date '0 0' . + 1 new obsolescence markers obsoleted 1 changesets $ hg strip --hidden -q 'desc(A1)' $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-D3.t Mon Sep 09 17:26:17 2019 -0400 @@ -57,8 +57,10 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -d '0 0' . + 1 new obsolescence markers obsoleted 1 changesets $ hg strip --hidden -q 'desc(A1)' $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-exchange-obsmarkers-case-D4.t Mon Sep 09 17:26:17 2019 -0400 @@ -59,12 +59,16 @@ created new head $ mkcommit B1 $ hg debugobsolete `getid 'desc(A0)'` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A1)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(B0)'` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B1)'` + 1 new obsolescence markers $ hg log -G --hidden @ 069b05c3876d (draft): B1 |
--- a/tests/test-fix-metadata.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-fix-metadata.t Mon Sep 09 17:26:17 2019 -0400 @@ -43,6 +43,9 @@ > [extensions] > fix = > [fix] + > metadatafalse:command=cat $TESTTMP/missing + > metadatafalse:pattern=metadatafalse + > metadatafalse:metadata=false > missing:command=cat $TESTTMP/missing > missing:pattern=missing > missing:metadata=true @@ -65,6 +68,7 @@ $ hg init repo $ cd repo + $ printf "old content\n" > metadatafalse $ printf "old content\n" > invalid $ printf "old content\n" > missing $ printf "old content\n" > valid @@ -72,15 +76,20 @@ $ hg fix -w ignored invalid output from fixer tool: invalid + fixed metadatafalse in revision 2147483647 using metadatafalse ignored invalid output from fixer tool: missing fixed valid in revision 2147483647 using valid saw "key" 1 times fixed 1 files with valid fixed the working copy - $ cat missing invalid valid + $ cat metadatafalse + new content + $ cat missing old content + $ cat invalid old content + $ cat valid new content $ cd ..
--- a/tests/test-fix.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-fix.t Mon Sep 09 17:26:17 2019 -0400 @@ -147,6 +147,15 @@ {first} The 1-based line number of the first line in the modified range {last} The 1-based line number of the last line in the modified range + Deleted sections of a file will be ignored by :linerange, because there is no + corresponding line range in the version being fixed. + + By default, tools that set :linerange will only be executed if there is at + least one changed line range. This is meant to prevent accidents like running + a code formatter in such a way that it unexpectedly reformats the whole file. + If such a tool needs to operate on unchanged files, it should set the + :skipclean suboption to false. + The :pattern suboption determines which files will be passed through each configured tool. See 'hg help patterns' for possible values. If there are file arguments to 'hg fix', the intersection of these patterns is used. @@ -215,6 +224,13 @@ executions that modified a file. This aggregates the same metadata previously passed to the "postfixfile" hook. + Fixer tools are run the in repository's root directory. This allows them to + read configuration files from the working copy, or even write to the working + copy. The working copy is not updated to match the revision being fixed. In + fact, several revisions may be fixed in parallel. Writes to the working copy + are not amended into the revision being fixed; fixer tools should always write + fixed file content back to stdout as documented above. + list of commands: fix rewrite file content in changesets or working directory @@ -439,6 +455,18 @@ $ printf "a\nb\nc\nd\ne\nf\ng\n" > foo.changed $ hg commit -Aqm "foo" $ printf "zz\na\nc\ndd\nee\nff\nf\ngg\n" > foo.changed + + $ hg fix --working-dir + $ cat foo.changed + ZZ + a + c + DD + EE + FF + f + GG + $ hg fix --working-dir --whole $ cat foo.changed ZZ @@ -526,6 +554,21 @@ $ cd .. +If we try to fix a missing file, we still fix other files. + + $ hg init fixmissingfile + $ cd fixmissingfile + + $ printf "fix me!\n" > foo.whole + $ hg add + adding foo.whole + $ hg fix --working-dir foo.whole bar.whole + bar.whole: $ENOENT$ + $ cat *.whole + FIX ME! + + $ cd .. + Specifying a directory name should fix all its files and subdirectories. $ hg init fixdirectory @@ -1060,6 +1103,7 @@ $ printf "foo\n" > foo.changed $ hg commit -Aqm "foo" $ hg debugobsolete `hg parents --template '{node}'` + 1 new obsolescence markers obsoleted 1 changesets $ hg --hidden fix -r 0 abort: fixing obsolete revision could cause divergence @@ -1161,28 +1205,6 @@ $ cd .. -The :fileset subconfig was a misnomer, so we renamed it to :pattern. We will -still accept :fileset by itself as if it were :pattern, but this will issue a -warning. - - $ hg init filesetispattern - $ cd filesetispattern - - $ printf "foo\n" > foo.whole - $ printf "first\nsecond\n" > bar.txt - $ hg add -q - $ hg fix -w --config fix.sometool:fileset=bar.txt \ - > --config fix.sometool:command="sort -r" - the fix.tool:fileset config name is deprecated; please rename it to fix.tool:pattern - - $ cat foo.whole - FOO - $ cat bar.txt - second - first - - $ cd .. - The execution order of tools can be controlled. This example doesn't work if you sort after truncating, but the config defines the correct order while the definitions are out of order (which might imply the incorrect order given the @@ -1264,3 +1286,114 @@ $ cd .. +We run fixer tools in the repo root so they can look for config files or other +important things in the working directory. This does NOT mean we are +reconstructing a working copy of every revision being fixed; we're just giving +the tool knowledge of the repo's location in case it can do something +reasonable with that. + + $ hg init subprocesscwd + $ cd subprocesscwd + + $ cat >> .hg/hgrc <<EOF + > [fix] + > printcwd:command = pwd + > printcwd:pattern = path:foo/bar + > EOF + + $ mkdir foo + $ printf "bar\n" > foo/bar + $ hg commit -Aqm blah + + $ hg fix -w -r . foo/bar + $ hg cat -r tip foo/bar + $TESTTMP/subprocesscwd + $ cat foo/bar + $TESTTMP/subprocesscwd + + $ cd foo + + $ hg fix -w -r . bar + $ hg cat -r tip bar + $TESTTMP/subprocesscwd + $ cat bar + $TESTTMP/subprocesscwd + + $ cd ../.. + +Tools configured without a pattern are ignored. It would be too dangerous to +run them on all files, because this might happen while testing a configuration +that also deletes all of the file content. There is no reasonable subset of the +files to use as a default. Users should be explicit about what files are +affected by a tool. This test also confirms that we don't crash when the +pattern config is missing, and that we only warn about it once. + + $ hg init nopatternconfigured + $ cd nopatternconfigured + + $ printf "foo" > foo + $ printf "bar" > bar + $ hg add -q + $ hg fix --debug --working-dir --config "fix.nopattern:command=echo fixed" + fixer tool has no pattern configuration: nopattern + $ cat foo bar + foobar (no-eol) + + $ cd .. + +Test that we can configure a fixer to affect all files regardless of the cwd. +The way we invoke matching must not prohibit this. + + $ hg init affectallfiles + $ cd affectallfiles + + $ mkdir foo bar + $ printf "foo" > foo/file + $ printf "bar" > bar/file + $ printf "baz" > baz_file + $ hg add -q + + $ cd bar + $ hg fix --working-dir --config "fix.cooltool:command=echo fixed" \ + > --config "fix.cooltool:pattern=rootglob:**" + $ cd .. + + $ cat foo/file + fixed + $ cat bar/file + fixed + $ cat baz_file + fixed + + $ cd .. + +Tools should be able to run on unchanged files, even if they set :linerange. +This includes a corner case where deleted chunks of a file are not considered +changes. + + $ hg init skipclean + $ cd skipclean + + $ printf "a\nb\nc\n" > foo + $ printf "a\nb\nc\n" > bar + $ printf "a\nb\nc\n" > baz + $ hg commit -Aqm "base" + + $ printf "a\nc\n" > foo + $ printf "a\nx\nc\n" > baz + + $ hg fix --working-dir foo bar baz \ + > --config 'fix.changedlines:command=printf "Line ranges:\n"; ' \ + > --config 'fix.changedlines:linerange=printf "{first} through {last}\n"; ' \ + > --config 'fix.changedlines:pattern=rootglob:**' \ + > --config 'fix.changedlines:skipclean=false' + + $ cat foo + Line ranges: + $ cat bar + Line ranges: + $ cat baz + Line ranges: + 2 through 2 + + $ cd ..
--- a/tests/test-flagprocessor.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-flagprocessor.t Mon Sep 09 17:26:17 2019 -0400 @@ -205,9 +205,9 @@ extsetup(ui) File "*/tests/flagprocessorext.py", line *, in extsetup (glob) validatehash, - File "*/mercurial/revlog.py", line *, in addflagprocessor (glob) - _insertflagprocessor(flag, processor, _flagprocessors) - File "*/mercurial/revlog.py", line *, in _insertflagprocessor (glob) + File "*/mercurial/revlogutils/flagutil.py", line *, in addflagprocessor (glob) + insertflagprocessor(flag, processor, flagprocessors) + File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob) raise error.Abort(msg) mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !) Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
--- a/tests/test-glog-beautifygraph.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-glog-beautifygraph.t Mon Sep 09 17:26:17 2019 -0400 @@ -2426,6 +2426,7 @@ > EOF $ hg debugobsolete `hg id --debug -i -r 8` + 1 new obsolescence markers obsoleted 1 changesets $ testlog []
--- a/tests/test-glog.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-glog.t Mon Sep 09 17:26:17 2019 -0400 @@ -2276,6 +2276,7 @@ > EOF $ hg debugobsolete `hg id --debug -i -r 8` + 1 new obsolescence markers obsoleted 1 changesets $ testlog []
--- a/tests/test-hgignore.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-hgignore.t Mon Sep 09 17:26:17 2019 -0400 @@ -176,6 +176,8 @@ ? .hgignore ? a.c ? syntax + $ hg debugignore + <includematcher includes='.*\\.o(?:/|$)'> $ cd .. $ echo > .hg/testhgignorerel @@ -222,7 +224,7 @@ A b.o $ hg debugignore - <includematcher includes='(?:|.*/)[^/]*(?:/|$)'> + <includematcher includes='.*(?:/|$)'> $ hg debugignore b.o b.o is ignored
--- a/tests/test-hook.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-hook.t Mon Sep 09 17:26:17 2019 -0400 @@ -720,7 +720,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files 4:539e4b31b6dc pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1 HG_HOOKTYPE=pretxnchangegroup @@ -763,8 +762,8 @@ adding changesets adding manifests adding file changes + adding remote bookmark quux added 1 changesets with 1 changes to 1 files - adding remote bookmark quux new changesets 539e4b31b6dc (run 'hg update' to get a working copy) $ hg rollback @@ -995,8 +994,8 @@ adding changesets adding manifests adding file changes + adding remote bookmark quux added 1 changesets with 1 changes to 1 files - adding remote bookmark quux new changesets 539e4b31b6dc (run 'hg update' to get a working copy) @@ -1235,13 +1234,13 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files changeset: 1:9836a07b9b9d tag: tip user: test date: Thu Jan 01 00:00:00 1970 +0000 summary: b + added 1 changesets with 1 changes to 1 files pretxnclose hook failure should abort the transaction
--- a/tests/test-http-bad-server.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-http-bad-server.t Mon Sep 09 17:26:17 2019 -0400 @@ -1092,7 +1092,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files transaction abort! rollback completed abort: HTTP request error (incomplete response) (py3 !)
--- a/tests/test-http.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-http.t Mon Sep 09 17:26:17 2019 -0400 @@ -338,12 +338,13 @@ bundle2-input-bundle: no-transaction bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported bundle2-input-part: "output" (advisory) (params: 0 advisory) supported - bundle2-input-part: total payload size 100 + bundle2-input-part: total payload size 55 remote: adding changesets remote: adding manifests remote: adding file changes + bundle2-input-part: "output" (advisory) supported + bundle2-input-part: total payload size 45 remote: added 1 changesets with 1 changes to 1 files - bundle2-input-part: "output" (advisory) supported bundle2-input-bundle: 2 parts total preparing listkeys for "phases" sending listkeys command
--- a/tests/test-infinitepush-bundlestore.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-infinitepush-bundlestore.t Mon Sep 09 17:26:17 2019 -0400 @@ -168,8 +168,8 @@ adding changesets adding manifests adding file changes + adding remote bookmark newbook added 1 changesets with 1 changes to 2 files - adding remote bookmark newbook new changesets 1de1d7d92f89 (1 drafts) (run 'hg update' to get a working copy) $ hg log -G -T '{desc} {phase} {bookmarks}'
--- a/tests/test-infinitepush.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-infinitepush.t Mon Sep 09 17:26:17 2019 -0400 @@ -78,11 +78,10 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) + added 2 changesets with 2 changes to 2 files (+1 heads) new changesets * (glob) (run 'hg heads' to see heads, 'hg merge' to merge) $ hg log -r scratch/secondpart -T '{node}' @@ -158,11 +157,10 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files + added 2 changesets with 2 changes to 2 files (+1 heads) new changesets a79b6597f322:c70aee6da07d (1 drafts) (run 'hg heads .' to see heads, 'hg merge' to merge) $ hg log -r scratch/scratchontopofpublic -T '{phase}'
--- a/tests/test-install.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-install.t Mon Sep 09 17:26:17 2019 -0400 @@ -153,6 +153,16 @@ 1 problems detected, please check your install! [1] +debuginstall extension support + $ hg debuginstall --config extensions.fsmonitor= --config fsmonitor.watchman_exe=false | grep atchman + fsmonitor checking for watchman binary... (false) + watchman binary missing or broken: warning: Watchman unavailable: watchman exited with code 1 +Verify the json works too: + $ hg debuginstall --config extensions.fsmonitor= --config fsmonitor.watchman_exe=false -Tjson | grep atchman + "fsmonitor-watchman": "false", + "fsmonitor-watchman-error": "warning: Watchman unavailable: watchman exited with code 1", + + #if test-repo $ . "$TESTDIR/helpers-testrepo.sh"
--- a/tests/test-largefiles.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-largefiles.t Mon Sep 09 17:26:17 2019 -0400 @@ -1115,7 +1115,7 @@ $ hg pull -v --lfrev 'heads(pulled())+min(pulled())' pulling from $TESTTMP/a searching for changes - all local heads known remotely + all local changesets known remotely 6 changesets found uncompressed size of bundle content: 1389 (changelog)
--- a/tests/test-lfconvert.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-lfconvert.t Mon Sep 09 17:26:17 2019 -0400 @@ -332,6 +332,7 @@ > evolution.createmarkers=True > EOF $ hg debugobsolete `hg log -r tip -T "{node}"` + 1 new obsolescence markers obsoleted 1 changesets $ cd ..
--- a/tests/test-lfs-serve-access.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-lfs-serve-access.t Mon Sep 09 17:26:17 2019 -0400 @@ -105,7 +105,6 @@ adding manifests adding file changes adding lfs.bin revisions - added 1 changesets with 1 changes to 1 files bundle2-input-part: total payload size 648 bundle2-input-part: "listkeys" (params: 1 mandatory) supported bundle2-input-part: "phase-heads" supported @@ -115,6 +114,7 @@ bundle2-input-bundle: 3 parts total checking for updated bookmarks updating the branch cache + added 1 changesets with 1 changes to 1 files new changesets 525251863cad updating to branch default resolving manifests
--- a/tests/test-lfs-serve.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-lfs-serve.t Mon Sep 09 17:26:17 2019 -0400 @@ -499,8 +499,8 @@ adding changesets adding manifests adding file changes + calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs added 6 changesets with 5 changes to 5 files (+1 heads) - calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs new changesets d437e1d24fbd:d3b84d50eacb resolving manifests lfs: assuming remote store: http://localhost:$HGPORT/.git/info/lfs
--- a/tests/test-lfs-test-server.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-lfs-test-server.t Mon Sep 09 17:26:17 2019 -0400 @@ -135,13 +135,13 @@ adding manifests adding file changes adding a revisions - added 1 changesets with 1 changes to 1 files calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs bundle2-input-part: total payload size 617 bundle2-input-part: "phase-heads" supported bundle2-input-part: total payload size 24 bundle2-input-bundle: 3 parts total updating the branch cache + added 1 changesets with 1 changes to 1 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction @@ -312,12 +312,12 @@ adding b revisions adding c revisions adding d revisions - added 1 changesets with 3 changes to 3 files bundle2-input-part: total payload size 1315 bundle2-input-part: "phase-heads" supported bundle2-input-part: total payload size 24 bundle2-input-bundle: 4 parts total updating the branch cache + added 1 changesets with 3 changes to 3 files bundle2-output-bundle: "HG20", 1 parts total bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload bundle2-input-bundle: no-transaction
--- a/tests/test-lfs.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-lfs.t Mon Sep 09 17:26:17 2019 -0400 @@ -124,8 +124,8 @@ adding changesets adding manifests adding file changes + calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs added 2 changesets with 3 changes to 3 files - calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs $ grep lfs $TESTTMP/server/.hg/requires lfs @@ -701,7 +701,7 @@ > if len(fl) == 0: > continue > sizes = [fl._revlog.rawsize(i) for i in fl] - > texts = [fl.revision(i, raw=True) for i in fl] + > texts = [fl.rawdata(i) for i in fl] > flags = [int(fl._revlog.flags(i)) for i in fl] > hashes = [hash(t) for t in texts] > pycompat.stdout.write(b' %s: rawsizes=%r flags=%r hashes=%s\n'
--- a/tests/test-log.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-log.t Mon Sep 09 17:26:17 2019 -0400 @@ -1941,6 +1941,7 @@ 1:a765632148dc55d38c35c4f247c618701886cb2f 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05 $ hg debugobsolete a765632148dc55d38c35c4f247c618701886cb2f + 1 new obsolescence markers obsoleted 1 changesets $ hg up null -q $ hg log --template='{rev}:{node}\n' @@ -1995,6 +1996,7 @@ $ hg bookmark -d X@foo $ hg up null -q $ hg debugobsolete 9f758d63dcde62d547ebfb08e1e7ee96535f2b05 + 1 new obsolescence markers obsoleted 1 changesets $ echo f > b $ hg ci -Am'b' -d '2 0' @@ -2470,6 +2472,7 @@ $ hg log -T '{node}\n' -r 1 2294ae80ad8447bc78383182eeac50cb049df623 $ hg debugobsolete 2294ae80ad8447bc78383182eeac50cb049df623 + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G o changeset: 4:50b9b36e9c5d @@ -2520,6 +2523,7 @@ $ hg log -T '{node}\n' -r 4 50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2 $ hg debugobsolete 50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2 + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G a @ changeset: 3:15b2327059e5
--- a/tests/test-logexchange.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-logexchange.t Mon Sep 09 17:26:17 2019 -0400 @@ -98,9 +98,9 @@ adding changesets adding manifests adding file changes - added 9 changesets with 9 changes to 9 files (+1 heads) adding remote bookmark bar adding remote bookmark foo + added 9 changesets with 9 changes to 9 files (+1 heads) new changesets 18d04c59bb5d:3e1487808078 (run 'hg heads' to see heads)
--- a/tests/test-narrow-exchange.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-narrow-exchange.t Mon Sep 09 17:26:17 2019 -0400 @@ -217,7 +217,7 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 0 changes to 0 files + remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !) remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e021835: no match found (lfs-on !) remote: transaction abort! (lfs-on !) remote: rollback completed (lfs-on !)
--- a/tests/test-narrow-trackedcmd.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-narrow-trackedcmd.t Mon Sep 09 17:26:17 2019 -0400 @@ -220,5 +220,5 @@ $ hg init non-narrow $ cd non-narrow $ hg tracked --addinclude foobar - abort: the tracked command is only supported on respositories cloned with --narrow + abort: the tracked command is only supported on repositories cloned with --narrow [255]
--- a/tests/test-narrow-widen-no-ellipsis.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-narrow-widen-no-ellipsis.t Mon Sep 09 17:26:17 2019 -0400 @@ -116,7 +116,7 @@ query 1; heads sending batch command searching for changes - all local heads known remotely + all local changesets known remotely sending narrow_widen command bundle2-input-bundle: with-transaction bundle2-input-part: "changegroup" (params: * mandatory) supported (glob) @@ -125,9 +125,9 @@ adding widest/ revisions (tree !) adding file changes adding widest/f revisions - added 0 changesets with 1 changes to 1 files bundle2-input-part: total payload size * (glob) bundle2-input-bundle: 0 parts total + added 0 changesets with 1 changes to 1 files widest/f: narrowspec updated -> g getting widest/f $ hg tracked
--- a/tests/test-narrow-widen.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-narrow-widen.t Mon Sep 09 17:26:17 2019 -0400 @@ -340,7 +340,6 @@ adding changesets adding manifests adding file changes - added 3 changesets with 2 changes to 2 files transaction abort! rollback completed abort: pretxnchangegroup.bad hook exited with status 1
--- a/tests/test-narrow.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-narrow.t Mon Sep 09 17:26:17 2019 -0400 @@ -157,6 +157,7 @@ $ hg co '.^' 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg tracked --removeinclude d0 comparing with ssh://user@dummy/master
--- a/tests/test-notify.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-notify.t Mon Sep 09 17:26:17 2019 -0400 @@ -99,7 +99,13 @@ "/long/path/repository" into "repository". Default: 0. notify.domain - Default email domain for sender or recipients with no explicit domain. + Default email domain for sender or recipients with no explicit domain. It is + also used for the domain part of the "Message-Id" when using + "notify.messageidseed". + + notify.messageidseed + Create deterministic "Message-Id" headers for the mails based on the seed + and the revision identifier of the first commit in the changeset. notify.style Style file to use when formatting emails. @@ -190,7 +196,7 @@ of the very long subject line pull (minimal config) - $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py + $ hg --traceback --cwd b --config notify.domain=example.com --config notify.messageidseed=example pull ../a | "$PYTHON" $TESTTMP/filter.py pulling from ../a searching for changes adding changesets @@ -203,10 +209,10 @@ Content-Transfer-Encoding: 7bit Date: * (glob) Subject: changeset in $TESTTMP/b: b - From: test + From: test@example.com X-Hg-Notification: changeset 00a13f371396 - Message-Id: <*> (glob) - To: baz, foo@bar + Message-Id: <hg.ba3098a36bd4c297288d16788623a841f81f618ea961a0f0fd65de7eb1191b66@example.com> + To: baz@example.com, foo@bar changeset 00a13f371396 in $TESTTMP/b details: $TESTTMP/b?cmd=changeset;node=00a13f371396
--- a/tests/test-obshistory.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obshistory.t Mon Sep 09 17:26:17 2019 -0400 @@ -88,6 +88,7 @@ summary: ROOT $ hg debugobsolete --record-parents `getid 'desc(B0)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -162,6 +163,7 @@ adding b $ hg debugobsolete `getid '1'` `getid '2'` `getid '3'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -251,6 +253,7 @@ adding d $ hg debugobsolete `getid '1'` `getid '2'` `getid '3'` `getid '4'` `getid '5'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -336,9 +339,11 @@ created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(C0)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(C0)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -507,9 +512,11 @@ created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(C0)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B1)'` `getid 'desc(C0)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G
--- a/tests/test-obsmarker-template.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsmarker-template.t Mon Sep 09 17:26:17 2019 -0400 @@ -387,6 +387,7 @@ $ hg commit -A -m "A0" adding b $ hg debugobsolete `getid "1"` `getid "2"` `getid "3"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -593,9 +594,11 @@ adding B0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -1120,9 +1123,11 @@ adding B0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B1)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -1601,11 +1606,14 @@ Create the cycle $ hg debugobsolete `getid "desc(A0)"` `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(A0)"` + 1 new obsolescence markers Check templates --------------- @@ -1854,6 +1862,7 @@ summary: ROOT $ hg debugobsolete `getid "4"` `getid "5"` `getid "6"` `getid "7"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G @ changeset: 7:ba2ed02b0c9a @@ -2301,6 +2310,7 @@ $ mkcommit ROOT $ mkcommit A0 $ hg debugobsolete --record-parent `getid "."` + 1 new obsolescence markers obsoleted 1 changesets Check output @@ -2330,6 +2340,7 @@ $ mkcommit A0 $ hg commit --amend -m "A1" $ hg debugobsolete --record-parent `getid "."` + 1 new obsolescence markers obsoleted 1 changesets $ hg up -r "desc(A0)" --hidden @@ -2338,6 +2349,7 @@ (hidden revision '471f378eab4c' is pruned) $ hg commit --amend -m "A2" $ hg debugobsolete --record-parent `getid "."` + 1 new obsolescence markers obsoleted 1 changesets Check output @@ -2481,10 +2493,12 @@ $ hg commit -A -m "A2" adding b $ hg debugobsolete `getid "1"` `getid "2"` `getid "3"` + 1 new obsolescence markers obsoleted 1 changesets # Simulate prune $ hg debugobsolete --record-parent `getid "."` + 1 new obsolescence markers obsoleted 1 changesets $ hg log --hidden -G @@ -2622,6 +2636,7 @@ > --config devel.user.obsmarker="`cat test2`" $ mkcommit B0 $ HGENCODING=latin-1 hg debugobsolete -u "`cat test2`" "`getid 'desc(B0)'`" + 1 new obsolescence markers obsoleted 1 changesets metadata should be stored in UTF-8, and debugobsolete doesn't decode it to
--- a/tests/test-obsolete-bundle-strip.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-bundle-strip.t Mon Sep 09 17:26:17 2019 -0400 @@ -126,9 +126,12 @@ $ mkcommit 'C-A1' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc("C-A0")'` a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1 `getid 'desc("C-A1")'` + 1 new obsolescence markers $ hg up 'desc("ROOT")' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved @@ -272,10 +275,13 @@ $ mkcommit 'C-A1' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid 'desc("C-B0")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 'desc("ROOT")' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved @@ -461,9 +467,12 @@ $ mkcommit 'C-A1' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'` + 1 new obsolescence markers $ hg debugobsolete --record-parents `getid 'desc("C-A0")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'` + 1 new obsolescence markers $ hg up 'desc("ROOT")' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg log --hidden -G @@ -606,10 +615,13 @@ $ mkcommit 'C-A1' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid 'desc("C-B0")'` + 1 new obsolescence markers obsoleted 1 changesets (it is annoying to create prune with parent data without the changeset, so we strip it after the fact) @@ -688,9 +700,12 @@ $ mkcommit 'C-A1' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A0")'` + 1 new obsolescence markers $ hg debugobsolete --record-parents `getid 'desc("C-A0")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc("C-A0")'` `getid 'desc("C-A1")'` + 1 new obsolescence markers (it is annoying to create prune with parent data without the changeset, so we strip it after the fact) @@ -775,20 +790,29 @@ $ mkcommit 'C-E' created new head $ hg debugobsolete a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0 `getid 'desc("C-A")'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc("C-A")'` `getid 'desc("C-B")'` `getid 'desc("C-C")'` # record split + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc("C-A")'` `getid 'desc("C-D")'` # other divergent + 1 new obsolescence markers 3 new content-divergent changesets $ hg debugobsolete `getid 'desc("C-A")'` b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0 + 1 new obsolescence markers $ hg debugobsolete b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0 `getid 'desc("C-E")'` + 1 new obsolescence markers 1 new content-divergent changesets $ hg debugobsolete `getid 'desc("C-B")'` `getid 'desc("C-E")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc("C-C")'` `getid 'desc("C-E")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc("C-D")'` `getid 'desc("C-E")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0 `getid 'desc("C-E")'` + 1 new obsolescence markers $ hg up 'desc("ROOT")' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved @@ -1334,6 +1358,7 @@ $ mkcommit 'C-A' $ mkcommit 'C-B' $ hg debugobsolete --record-parent `getid 'desc("C-B")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 'desc("ROOT")'
--- a/tests/test-obsolete-changeset-exchange.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-changeset-exchange.t Mon Sep 09 17:26:17 2019 -0400 @@ -34,6 +34,7 @@ o base d20a80d4def38df63a4b330b7fb688f3d4cae1e3 $ hg debugobsolete 9d73aac1b2ed7d53835eaeec212ed41ea47da53a f89bcc95eba5174b1ccc3e33a82e84c96e8338ee + 1 new obsolescence markers obsoleted 1 changesets Push it. The bundle should not refer to the extinct changeset. @@ -139,6 +140,7 @@ $ echo 2b > foo $ hg -q commit -m 2b $ hg debugobsolete 6a29ed9c68defff1a139e5c6fa9696fb1a75783d bec0734cd68e84477ba7fc1d13e6cff53ab70129 + 1 new obsolescence markers obsoleted 1 changesets $ cd .. @@ -168,7 +170,6 @@ adding manifests adding file changes adding foo revisions - added 1 changesets with 1 changes to 1 files (+1 heads) bundle2-input-part: total payload size 476 bundle2-input-part: "listkeys" (params: 1 mandatory) supported bundle2-input-part: "phase-heads" supported @@ -178,5 +179,6 @@ bundle2-input-bundle: 3 parts total checking for updated bookmarks updating the branch cache + added 1 changesets with 1 changes to 1 files (+1 heads) new changesets bec0734cd68e (run 'hg heads' to see heads, 'hg merge' to merge)
--- a/tests/test-obsolete-checkheads.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-checkheads.t Mon Sep 09 17:26:17 2019 -0400 @@ -47,6 +47,7 @@ $ mkcommit new created new head $ hg debugobsolete --flags 1 `getid old` `getid new` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 71e3228bffe1 (draft) add new @@ -190,6 +191,7 @@ $ mkcommit desc2 created new head $ hg debugobsolete `getid old` `getid new` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 5fe37041cc2b (draft) add desc2 @@ -247,6 +249,7 @@ $ hg id --debug -r tip 71e3228bffe1886550777233d6c97bb5a6b2a650 tip $ hg debugobsolete c70b08862e0838ea6d7c59c85da2f1ed6c8d67da 71e3228bffe1886550777233d6c97bb5a6b2a650 + 1 new obsolescence markers $ hg log -G --hidden @ 71e3228bffe1 (draft) add new | @@ -301,6 +304,7 @@ $ mkcommit new-unrelated created new head $ hg debugobsolete `getid old` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 350a93b716be (draft) add new-unrelated
--- a/tests/test-obsolete-distributed.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-distributed.t Mon Sep 09 17:26:17 2019 -0400 @@ -50,6 +50,7 @@ $ mkcommit c_B0 created new head $ hg debugobsolete `getid 'desc("c_A0")'` `getid 'desc("c_A1")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden -v @ 3:e5d7dda7cd28 c_B0 @@ -82,6 +83,7 @@ $ mkcommit c_B1 created new head $ hg debugobsolete `getid 'desc("c_B0")'` `getid 'desc("c_B1")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G @ 4:391a2bf12b1b c_B1 @@ -111,6 +113,7 @@ $ cd client $ hg debugobsolete `getid 'desc("c_A1")'` `getid 'desc("c_B0")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G @ 3:e5d7dda7cd28 c_B0 @@ -537,6 +540,7 @@ $ cd repo-a $ hg debugbuilddag .. $ hg debugobsolete `getid tip` + 1 new obsolescence markers obsoleted 1 changesets $ cd ../ $ hg clone --pull repo-a repo-b
--- a/tests/test-obsolete-divergent.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-divergent.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,8 +64,10 @@ $ newcase direct $ hg debugobsolete `getid A_0` `getid A_1` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_0` `getid A_2` + 1 new obsolescence markers 2 new content-divergent changesets $ hg log -G --hidden * 3:392fd25390da A_2 @@ -124,12 +126,15 @@ $ newcase indirect_known $ hg debugobsolete `getid A_0` `getid A_1` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_0` `getid A_2` + 1 new obsolescence markers 2 new content-divergent changesets $ mkcommit A_3 created new head $ hg debugobsolete `getid A_2` `getid A_3` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 4:01f36c5a8fda A_3 @@ -184,9 +189,12 @@ $ newcase indirect_unknown $ hg debugobsolete `getid A_0` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid A_1` + 1 new obsolescence markers $ hg debugobsolete `getid A_0` `getid A_2` + 1 new obsolescence markers 2 new content-divergent changesets $ hg log -G --hidden * 3:392fd25390da A_2 @@ -234,12 +242,17 @@ $ newcase final-unknown $ hg debugobsolete `getid A_0` `getid A_1` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_1` `getid A_2` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_0` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + 1 new obsolescence markers $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccccccccccccccccccccccccccccccccc + 1 new obsolescence markers $ hg debugobsolete `getid A_1` dddddddddddddddddddddddddddddddddddddddd + 1 new obsolescence markers $ hg debugsuccessorssets --hidden 'desc('A_0')' 007dc284c1f8 @@ -256,14 +269,18 @@ $ newcase converged_divergence $ hg debugobsolete `getid A_0` `getid A_1` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_0` `getid A_2` + 1 new obsolescence markers 2 new content-divergent changesets $ mkcommit A_3 created new head $ hg debugobsolete `getid A_1` `getid A_3` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_2` `getid A_3` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 4:01f36c5a8fda A_3 @@ -312,6 +329,7 @@ $ newcase split $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden o 3:392fd25390da A_2 @@ -354,18 +372,21 @@ $ mkcommit A_3 created new head $ hg debugobsolete `getid A_1` `getid A_3` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit A_4 created new head $ hg debugobsolete `getid A_2` `getid A_4` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit A_5 created new head $ hg debugobsolete `getid A_4` `getid A_5` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 6:e442cfc57690 A_5 @@ -426,6 +447,7 @@ $ mkcommit B_0; hg up 0 0 files updated, 0 files merged, 2 files removed, 0 files unresolved $ hg debugobsolete `getid B_0` `getid A_2` + 1 new obsolescence markers obsoleted 1 changesets $ mkcommit A_7; hg up 0 created new head @@ -434,11 +456,13 @@ created new head 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid A_5` `getid A_7` `getid A_8` + 1 new obsolescence markers obsoleted 1 changesets $ mkcommit A_9; hg up 0 created new head 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid A_5` `getid A_9` + 1 new obsolescence markers 4 new content-divergent changesets $ hg log -G --hidden * 10:bed64f5d2f5a A_9 @@ -547,10 +571,13 @@ created new head 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid A_9` `getid A_A` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_7` `getid A_A` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_8` `getid A_A` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden o 11:a139f71be9da A_A @@ -675,8 +702,10 @@ $ newcase subset $ hg debugobsolete `getid A_0` `getid A_2` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2` + 1 new obsolescence markers $ hg debugsuccessorssets --hidden 'desc('A_0')' 007dc284c1f8 82623d38b9ba 392fd25390da
--- a/tests/test-obsolete-tag-cache.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete-tag-cache.t Mon Sep 09 17:26:17 2019 -0400 @@ -59,6 +59,7 @@ Hiding a non-tip changeset should change filtered hash and cause tags recompute $ hg debugobsolete -d '0 0' c3cb30f2d2cd0aae008cc91a07876e3c5131fd22 -u dummyuser + 1 new obsolescence markers obsoleted 1 changesets $ hg tags @@ -81,8 +82,10 @@ Hiding another changeset should cause the filtered hash to change $ hg debugobsolete -d '0 0' d75775ffbc6bca1794d300f5571272879bd280da -u dummyuser + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete -d '0 0' 5f97d42da03fd56f3b228b03dfe48af5c0adf75b -u dummyuser + 1 new obsolescence markers obsoleted 1 changesets $ hg tags
--- a/tests/test-obsolete.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-obsolete.t Mon Sep 09 17:26:17 2019 -0400 @@ -52,6 +52,7 @@ abort: changeset references must be full hexadecimal node identifiers [255] $ hg debugobsolete -d '0 0' `getid kill_me` -u babar + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'} @@ -88,6 +89,7 @@ created new head $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120' + 1 new obsolescence markers obsoleted 1 changesets $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden 2:245bde4270cd add original_c @@ -112,6 +114,7 @@ $ mkcommit new_2_c created new head $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'} @@ -124,8 +127,10 @@ $ mkcommit new_3_c created new head $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337 + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c` + 1 new obsolescence markers $ hg debugobsolete 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'} cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'} @@ -264,6 +269,7 @@ $ hg ci -m 'add n3w_3_c' created new head $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -r 'phasedivergent()' $ hg log -G @@ -323,6 +329,7 @@ $ mkcommit kill0 $ hg up -q null $ hg debugobsolete `getid kill0` + 1 new obsolescence markers obsoleted 1 changesets $ mkcommit a $ mkcommit b @@ -349,6 +356,7 @@ $ mkcommit b $ hg up -q null $ hg --config experimental.evolution.report-instabilities=false debugobsolete `getid a` + 1 new obsolescence markers obsoleted 1 changesets $ cd .. @@ -392,6 +400,7 @@ Rollback//Transaction support $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + 1 new obsolescence markers $ hg debugobsolete 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'} 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'} @@ -478,6 +487,7 @@ $ hg init tmpe $ cd tmpe $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 + 1 new obsolescence markers $ hg pull ../tmpb pulling from ../tmpb requesting all changes @@ -531,6 +541,7 @@ $ mkcommit original_d $ mkcommit original_e $ hg debugobsolete --record-parents `getid original_d` -d '0 0' + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete | grep `getid original_d` @@ -627,6 +638,7 @@ created new head $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'` \ > -u 'test <test@example.net>' + 1 new obsolescence markers obsoleted 1 changesets $ hg outgoing ../tmpf # parasite hg outgoing testin comparing with ../tmpf @@ -812,65 +824,125 @@ > do > hg debugobsolete $node > done + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets + 1 new obsolescence markers obsoleted 1 changesets $ hg up tip 2 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -938,9 +1010,11 @@ Several troubles on the same changeset (create an unstable and bumped and content-divergent changeset) $ hg debugobsolete `getid obsolete_e` + 1 new obsolescence markers obsoleted 1 changesets 2 new orphan changesets $ hg debugobsolete `getid original_c` `getid babar` + 1 new obsolescence markers 1 new phase-divergent changesets 2 new content-divergent changesets $ hg log --config ui.logtemplate= -r 'phasedivergent() and orphan() and contentdivergent()' @@ -1309,6 +1383,7 @@ grafting 1:1c9eddb02162 "content-1" (tip) $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'` + 1 new obsolescence markers obsoleted 1 changesets $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log @@ -1610,6 +1685,7 @@ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved saved backup bundle to $TESTTMP/tmpe/issue4845/doindexrev/.hg/strip-backup/9bc153528424-ee80edd4-backup.hg $ hg debugobsolete 9bc153528424ea266d13e57f9ff0d799dfe61e4b + 1 new obsolescence markers $ hg unbundle ../bundle-2.hg adding changesets adding manifests
--- a/tests/test-phases.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-phases.t Mon Sep 09 17:26:17 2019 -0400 @@ -638,6 +638,7 @@ (making a changeset hidden; H in that case) $ hg debugobsolete `hg id --debug -r 5` + 1 new obsolescence markers obsoleted 1 changesets $ cd ..
--- a/tests/test-pull-bundle.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-pull-bundle.t Mon Sep 09 17:26:17 2019 -0400 @@ -101,15 +101,13 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) + added 3 changesets with 3 changes to 3 files (+1 heads) new changesets bbd179dfa0a7:ed1b79f46b9a (3 drafts) updating to branch default 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pull-update.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-pull-update.t Mon Sep 09 17:26:17 2019 -0400 @@ -108,8 +108,8 @@ adding changesets adding manifests adding file changes + adding remote bookmark active-after-pull added 1 changesets with 1 changes to 1 files - adding remote bookmark active-after-pull new changesets f815b3da6163 1 local changesets published 1 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -138,8 +138,8 @@ adding changesets adding manifests adding file changes + adding remote bookmark active-after-pull added 1 changesets with 1 changes to 1 files - adding remote bookmark active-after-pull new changesets f815b3da6163 1 local changesets published 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-partial-C1.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,6 +60,7 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-partial-C2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-partial-C2.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,6 +60,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-push-checkheads-partial-C3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-partial-C3.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,6 +60,7 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-partial-C4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-partial-C4.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,6 +60,7 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-push-checkheads-pruned-B1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B1.t Mon Sep 09 17:26:17 2019 -0400 @@ -49,6 +49,7 @@ $ mkcommit B0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 74ff5441d343 (draft): B0
--- a/tests/test-push-checkheads-pruned-B2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B2.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,9 +60,11 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-pruned-B3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B3.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,9 +60,11 @@ $ mkcommit B1 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B4.t Mon Sep 09 17:26:17 2019 -0400 @@ -61,9 +61,11 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-pruned-B5.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B5.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,11 +64,14 @@ $ mkcommit B1 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 2 new orphan changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B6.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B6.t Mon Sep 09 17:26:17 2019 -0400 @@ -52,8 +52,10 @@ $ hg up 'desc(B0)' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden x ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B7.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B7.t Mon Sep 09 17:26:17 2019 -0400 @@ -51,8 +51,10 @@ $ hg up 'desc(B0)' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden x ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B8.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-pruned-B8.t Mon Sep 09 17:26:17 2019 -0400 @@ -67,13 +67,17 @@ $ mkcommit A2 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-superceed-A1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A1.t Mon Sep 09 17:26:17 2019 -0400 @@ -46,6 +46,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-superceed-A2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A2.t Mon Sep 09 17:26:17 2019 -0400 @@ -60,9 +60,11 @@ created new head $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 262c8c798096 (draft): B1
--- a/tests/test-push-checkheads-superceed-A3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A3.t Mon Sep 09 17:26:17 2019 -0400 @@ -63,9 +63,11 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1c7524e9488 (draft): A1
--- a/tests/test-push-checkheads-superceed-A4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A4.t Mon Sep 09 17:26:17 2019 -0400 @@ -48,6 +48,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ mkcommit B0 $ hg log -G --hidden
--- a/tests/test-push-checkheads-superceed-A5.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A5.t Mon Sep 09 17:26:17 2019 -0400 @@ -49,6 +49,7 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-superceed-A6.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A6.t Mon Sep 09 17:26:17 2019 -0400 @@ -69,9 +69,11 @@ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-superceed-A7.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A7.t Mon Sep 09 17:26:17 2019 -0400 @@ -69,9 +69,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-superceed-A8.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-superceed-A8.t Mon Sep 09 17:26:17 2019 -0400 @@ -53,8 +53,10 @@ $ mkcommit A2 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-unpushed-D1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D1.t Mon Sep 09 17:26:17 2019 -0400 @@ -49,6 +49,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D2.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,9 +64,11 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D3.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D3.t Mon Sep 09 17:26:17 2019 -0400 @@ -67,9 +67,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D4.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D4.t Mon Sep 09 17:26:17 2019 -0400 @@ -83,9 +83,11 @@ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D5.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D5.t Mon Sep 09 17:26:17 2019 -0400 @@ -72,9 +72,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D6.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D6.t Mon Sep 09 17:26:17 2019 -0400 @@ -56,8 +56,10 @@ $ mkcommit C0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-unpushed-D7.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-checkheads-unpushed-D7.t Mon Sep 09 17:26:17 2019 -0400 @@ -65,10 +65,13 @@ $ mkcommit C0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)"` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-http.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-http.t Mon Sep 09 17:26:17 2019 -0400 @@ -88,8 +88,8 @@ remote: adding manifests remote: adding file changes remote: adding a revisions + remote: updating the branch cache remote: added 1 changesets with 1 changes to 1 files - remote: updating the branch cache remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh @@ -117,8 +117,8 @@ remote: adding manifests remote: adding file changes remote: adding a revisions + remote: updating the branch cache remote: added 1 changesets with 1 changes to 1 files - remote: updating the branch cache remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh @@ -309,7 +309,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: prepushkey hook: HG_BUNDLE2=1 remote: HG_HOOKNAME=prepushkey remote: HG_HOOKTYPE=prepushkey @@ -351,7 +350,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: prepushkey hook: HG_BUNDLE2=1 remote: HG_HOOKNAME=prepushkey remote: HG_HOOKTYPE=prepushkey @@ -368,6 +366,7 @@ remote: HG_TXNNAME=serve remote: HG_URL=remote:http:$LOCALIP: (glob) remote: + remote: added 1 changesets with 1 changes to 1 files % serve errors #endif @@ -410,7 +409,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: prepushkey hook: HG_BUNDLE2=1 remote: HG_HOOKNAME=prepushkey remote: HG_HOOKTYPE=prepushkey @@ -465,7 +463,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: prepushkey hook: HG_BUNDLE2=1 remote: HG_HOOKNAME=prepushkey remote: HG_HOOKTYPE=prepushkey @@ -482,6 +479,7 @@ remote: HG_TXNNAME=serve remote: HG_URL=remote:http:$LOCALIP: (glob) remote: + remote: added 1 changesets with 1 changes to 1 files % serve errors #endif
--- a/tests/test-push-race.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push-race.t Mon Sep 09 17:26:17 2019 -0400 @@ -1608,6 +1608,7 @@ $ ID_Q=`hg -R client-racy log -T '{node}\n' -r 'desc("C-Q")'` $ ID_V=`hg -R client-racy log -T '{node}\n' -r 'desc("C-V")'` $ hg -R client-racy debugobsolete $ID_Q $ID_V + 1 new obsolescence markers obsoleted 1 changesets Pushing @@ -1800,6 +1801,7 @@ $ ID_V=`hg -R client-other log -T '{node}\n' -r 'desc("C-V")'` $ ID_W=`hg -R client-other log -T '{node}\n' -r 'desc("C-W")'` $ hg -R client-other debugobsolete $ID_V $ID_W + 1 new obsolescence markers obsoleted 1 changesets (continue the same head)
--- a/tests/test-push.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-push.t Mon Sep 09 17:26:17 2019 -0400 @@ -287,9 +287,9 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files lock: user *, process * (*s) (glob) wlock: free + added 1 changesets with 1 changes to 1 files $ hg --cwd 1 --config extensions.strip= strip tip -q $ hg --cwd 2 --config extensions.strip= strip tip -q @@ -299,9 +299,9 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files lock: user *, process * (*s) (glob) wlock: user *, process * (*s) (glob) + added 1 changesets with 1 changes to 1 files Test bare push with multiple race checking options --------------------------------------------------
--- a/tests/test-pushvars.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-pushvars.t Mon Sep 09 17:26:17 2019 -0400 @@ -41,9 +41,9 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files HG_USERVAR_BYPASS_REVIEW=true HG_USERVAR_DEBUG=1 + added 1 changesets with 1 changes to 1 files Test pushing var with empty right-hand side @@ -55,8 +55,8 @@ adding changesets adding manifests adding file changes + HG_USERVAR_DEBUG= added 1 changesets with 1 changes to 1 files - HG_USERVAR_DEBUG= Test pushing bad vars
--- a/tests/test-rebase-conflicts.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-rebase-conflicts.t Mon Sep 09 17:26:17 2019 -0400 @@ -315,7 +315,6 @@ adding manifests adding file changes adding f1.txt revisions - added 2 changesets with 2 changes to 1 files bundle2-input-part: total payload size 1686 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported bundle2-input-part: total payload size 74 @@ -323,6 +322,7 @@ bundle2-input-part: "phase-heads" supported bundle2-input-part: total payload size 24 bundle2-input-bundle: 2 parts total + added 2 changesets with 2 changes to 1 files updating the branch cache invalid branch cache (served): tip differs invalid branch cache (served.hidden): tip differs
--- a/tests/test-rebase-inmemory.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-rebase-inmemory.t Mon Sep 09 17:26:17 2019 -0400 @@ -506,6 +506,7 @@ $ hg rebase -s 2 -d 7 rebasing 2:177f92b77385 "c" abort: outstanding merge conflicts + (use 'hg resolve' to resolve) [255] $ hg resolve -l U e
--- a/tests/test-rebase-obsolete.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-rebase-obsolete.t Mon Sep 09 17:26:17 2019 -0400 @@ -651,6 +651,7 @@ $ hg commit -m J 1 new orphan changesets $ hg debugobsolete `hg log --rev . -T '{node}'` + 1 new obsolescence markers obsoleted 1 changesets $ hg rebase --rev .~1::. --dest 'max(desc(D))' --traceback --config experimental.rebaseskipobsolete=off @@ -838,6 +839,7 @@ o 0:4a2df7238c3b A $ hg debugobsolete `hg log -r 7 -T '{node}\n'` --config experimental.evolution=true + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg rebase -d 6 -r "4::" @@ -867,6 +869,7 @@ $ hg commit -m nonrelevant created new head $ hg debugobsolete `hg log -r 11 -T '{node}\n'` --config experimental.evolution=true + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G @ 11:f44da1f4954c nonrelevant (pruned) @@ -1007,6 +1010,7 @@ $ hg add L $ hg commit -m "dummy change" $ hg debugobsolete `hg log -r ".^" -T '{node}'` `hg log -r 18 -T '{node}'` --config experimental.evolution=true + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets @@ -1276,6 +1280,7 @@ > EOF 1 new orphan changesets $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G -r 'a': --hidden * 4:76be324c128b d
--- a/tests/test-remote-hidden.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remote-hidden.t Mon Sep 09 17:26:17 2019 -0400 @@ -36,6 +36,7 @@ $ hg ci -m "c_Pruned" created new head $ hg debugobsolete --record-parents `getid 'desc("c_Pruned")'` -d '0 0' + 1 new obsolescence markers obsoleted 1 changesets $ hg up ".^" 1 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -45,6 +46,7 @@ $ echo 5 > a $ hg ci -m "c_Secret_Pruned" --secret $ hg debugobsolete --record-parents `getid 'desc("c_Secret_Pruned")'` -d '0 0' + 1 new obsolescence markers obsoleted 1 changesets $ hg up null 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-remotefilelog-bgprefetch.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remotefilelog-bgprefetch.t Mon Sep 09 17:26:17 2019 -0400 @@ -1,6 +1,12 @@ #require no-windows $ . "$TESTDIR/remotefilelog-library.sh" +# devel.remotefilelog.ensurestart: reduce race condition with +# waiton{repack/prefetch} + $ cat >> $HGRCPATH <<EOF + > [devel] + > remotefilelog.ensurestart=True + > EOF $ hg init master $ cd master @@ -67,8 +73,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 6b4b6f66ef8c (run 'hg update' to get a working copy) prefetching file contents @@ -96,8 +102,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 6b4b6f66ef8c (run 'hg update' to get a working copy) prefetching file contents
--- a/tests/test-remotefilelog-prefetch.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remotefilelog-prefetch.t Mon Sep 09 17:26:17 2019 -0400 @@ -94,8 +94,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 109c3a557a73 (run 'hg update' to get a working copy) prefetching file contents @@ -118,8 +118,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 109c3a557a73 (run 'hg update' to get a working copy) prefetching file contents @@ -149,8 +149,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 109c3a557a73 1 local changesets published (?) (run 'hg update' to get a working copy)
--- a/tests/test-remotefilelog-repack-fast.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remotefilelog-repack-fast.t Mon Sep 09 17:26:17 2019 -0400 @@ -1,10 +1,13 @@ #require no-windows $ . "$TESTDIR/remotefilelog-library.sh" - +# devel.remotefilelog.ensurestart: reduce race condition with +# waiton{repack/prefetch} $ cat >> $HGRCPATH <<EOF > [remotefilelog] > fastdatapack=True + > [devel] + > remotefilelog.ensurestart=True > EOF $ hg init master
--- a/tests/test-remotefilelog-repack.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remotefilelog-repack.t Mon Sep 09 17:26:17 2019 -0400 @@ -1,6 +1,12 @@ #require no-windows $ . "$TESTDIR/remotefilelog-library.sh" +# devel.remotefilelog.ensurestart: reduce race condition with +# waiton{repack/prefetch} + $ cat >> $HGRCPATH <<EOF + > [devel] + > remotefilelog.ensurestart=True + > EOF $ hg init master $ cd master
--- a/tests/test-remotefilelog-sparse.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-remotefilelog-sparse.t Mon Sep 09 17:26:17 2019 -0400 @@ -58,8 +58,8 @@ adding changesets adding manifests adding file changes + updating bookmark foo added 1 changesets with 0 changes to 0 files - updating bookmark foo new changesets 876b1317060d (run 'hg update' to get a working copy) prefetching file contents
--- a/tests/test-resolve.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-resolve.t Mon Sep 09 17:26:17 2019 -0400 @@ -210,12 +210,15 @@ [1] $ hg up 0 abort: outstanding merge conflicts + (use 'hg resolve' to resolve) [255] $ hg merge 2 abort: outstanding merge conflicts + (use 'hg resolve' to resolve) [255] $ hg merge --force 2 abort: outstanding merge conflicts + (use 'hg resolve' to resolve) [255] set up conflict-free merge
--- a/tests/test-revlog-raw.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-revlog-raw.py Mon Sep 09 17:26:17 2019 -0400 @@ -16,6 +16,7 @@ from mercurial.revlogutils import ( deltas, + flagutil, ) # TESTTMP is optional. This makes it convenient to run without run-tests.py @@ -56,7 +57,7 @@ # can be used to verify hash. return False -revlog.addflagprocessor(revlog.REVIDX_EXTSTORED, +flagutil.addflagprocessor(revlog.REVIDX_EXTSTORED, (readprocessor, writeprocessor, rawprocessor)) # Utilities about reading and appending revlog @@ -161,7 +162,7 @@ p1 = rlog.node(r - 1) p2 = node.nullid if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED): - text = rlog.revision(r, raw=True) + text = rlog.rawdata(r) cachedelta = None else: # deltaparent cannot have EXTSTORED flag. @@ -268,7 +269,7 @@ abort('rev %d: wrong rawsize' % rev) if rlog.revision(rev, raw=False) != text: abort('rev %d: wrong text' % rev) - if rlog.revision(rev, raw=True) != rawtext: + if rlog.rawdata(rev) != rawtext: abort('rev %d: wrong rawtext' % rev) result.append((text, rawtext)) @@ -293,7 +294,10 @@ nlog = newrevlog() for rev in revorder: for raw in raworder: - t = nlog.revision(rev, raw=raw) + if raw: + t = nlog.rawdata(rev) + else: + t = nlog.revision(rev) if t != expected[rev][int(raw)]: abort('rev %d: corrupted %stext' % (rev, raw and 'raw' or ''))
--- a/tests/test-revset.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-revset.t Mon Sep 09 17:26:17 2019 -0400 @@ -2003,6 +2003,7 @@ 4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8 2147483647:ffffffffffffffffffffffffffffffffffffffff fffff $ hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571 + 1 new obsolescence markers obsoleted 1 changesets $ hg debugrevspec 'fff'
--- a/tests/test-rust-discovery.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-rust-discovery.py Mon Sep 09 17:26:17 2019 -0400 @@ -1,16 +1,9 @@ from __future__ import absolute_import import unittest -try: - from mercurial import rustext - rustext.__name__ # trigger immediate actual import -except ImportError: - rustext = None -else: - # this would fail already without appropriate ancestor.__package__ - from mercurial.rustext.discovery import ( - PartialDiscovery, - ) +from mercurial import policy + +PartialDiscovery = policy.importrust('discovery', member='PartialDiscovery') try: from mercurial.cext import parsers as cparsers @@ -38,8 +31,16 @@ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00' ) +class fakechangelog(object): + def __init__(self, idx): + self.index = idx -@unittest.skipIf(rustext is None or cparsers is None, +class fakerepo(object): + def __init__(self, idx): + """Just make so that self.changelog.index is the given idx.""" + self.changelog = fakechangelog(idx) + +@unittest.skipIf(PartialDiscovery is None or cparsers is None, "rustext or the C Extension parsers module " "discovery relies on is not available") class rustdiscoverytest(unittest.TestCase): @@ -57,6 +58,9 @@ def parseindex(self): return cparsers.parse_index2(data_non_inlined, False)[0] + def repo(self): + return fakerepo(self.parseindex()) + def testindex(self): idx = self.parseindex() # checking our assumptions about the index binary data: @@ -67,8 +71,7 @@ 3: (2, -1)}) def testaddcommonsmissings(self): - idx = self.parseindex() - disco = PartialDiscovery(idx, [3]) + disco = PartialDiscovery(self.repo(), [3], True) self.assertFalse(disco.hasinfo()) self.assertFalse(disco.iscomplete()) @@ -83,29 +86,29 @@ self.assertEqual(disco.commonheads(), {1}) def testaddmissingsstats(self): - idx = self.parseindex() - disco = PartialDiscovery(idx, [3]) + disco = PartialDiscovery(self.repo(), [3], True) self.assertIsNone(disco.stats()['undecided'], None) disco.addmissings([2]) self.assertEqual(disco.stats()['undecided'], 2) def testaddinfocommonfirst(self): - idx = self.parseindex() - disco = PartialDiscovery(idx, [3]) + disco = PartialDiscovery(self.repo(), [3], True) disco.addinfo([(1, True), (2, False)]) self.assertTrue(disco.hasinfo()) self.assertTrue(disco.iscomplete()) self.assertEqual(disco.commonheads(), {1}) def testaddinfomissingfirst(self): - idx = self.parseindex() - disco = PartialDiscovery(idx, [3]) + disco = PartialDiscovery(self.repo(), [3], True) disco.addinfo([(2, False), (1, True)]) self.assertTrue(disco.hasinfo()) self.assertTrue(disco.iscomplete()) self.assertEqual(disco.commonheads(), {1}) + def testinitnorandom(self): + PartialDiscovery(self.repo(), [3], True, randomize=False) + if __name__ == '__main__': import silenttestrunner silenttestrunner.main(__name__)
--- a/tests/test-server-view.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-server-view.t Mon Sep 09 17:26:17 2019 -0400 @@ -50,7 +50,12 @@ $ hg -R test --config experimental.extra-filter-revs='not public()' debugupdatecache $ ls -1 test/.hg/cache/ branch2-base%89c45d2fa07e + branch2-immutable%89c45d2fa07e branch2-served + branch2-served%89c45d2fa07e + branch2-served.hidden%89c45d2fa07e + branch2-visible%89c45d2fa07e + branch2-visible-hidden%89c45d2fa07e hgtagsfnodes1 rbc-names-v1 rbc-revs-v1
--- a/tests/test-setdiscovery.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-setdiscovery.t Mon Sep 09 17:26:17 2019 -0400 @@ -64,7 +64,7 @@ comparing with b query 1; heads searching for changes - all local heads known remotely + all local changesets known remotely elapsed time: * seconds (glob) heads summary: total common heads: 2 @@ -86,7 +86,7 @@ comparing with b query 1; heads searching for changes - all local heads known remotely + all local changesets known remotely elapsed time: * seconds (glob) heads summary: total common heads: 1 @@ -968,7 +968,7 @@ updating to branch b 0 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true + $ hg -R a debugdiscovery b --debug --verbose --config progress.debug=true --config devel.discovery.randomize=false comparing with b query 1; heads searching for changes @@ -980,13 +980,14 @@ query 3; still undecided: 980, sample size is: 200 sampling from both directions searching: 4 queries - query 4; still undecided: 435, sample size is: 210 (no-py3 !) - query 4; still undecided: 430, sample size is: 210 (py3 !) + query 4; still undecided: 497, sample size is: 210 sampling from both directions searching: 5 queries - query 5; still undecided: 185, sample size is: 185 (no-py3 !) - query 5; still undecided: 187, sample size is: 187 (py3 !) - 5 total queries in *.????s (glob) + query 5; still undecided: 285, sample size is: 220 + sampling from both directions + searching: 6 queries + query 6; still undecided: 63, sample size is: 63 + 6 total queries in *.????s (glob) elapsed time: * seconds (glob) heads summary: total common heads: 1 @@ -1095,16 +1096,9 @@ give 'all remote heads known locally' without checking the remaining heads - fixed in 86c35b7ae300: - $ cat >> $TESTTMP/unrandomsample.py << EOF - > import random - > def sample(population, k): - > return sorted(population)[:k] - > random.sample = sample - > EOF - $ cat >> r1/.hg/hgrc << EOF - > [extensions] - > unrandomsample = $TESTTMP/unrandomsample.py + > [devel] + > discovery.randomize = False > EOF $ hg -R r1 outgoing r2 -T'{rev} ' --config extensions.blackbox= \
--- a/tests/test-shelve.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-shelve.t Mon Sep 09 17:26:17 2019 -0400 @@ -1239,6 +1239,7 @@ > y > EOF unshelving change 'default' + temporarily committing pending changes (restore with 'hg unshelve --abort') rebasing shelved changes diff --git a/d b/d new file mode 100644 @@ -1250,6 +1251,10 @@ record this change to 'd'? (enter ? for help) [Ynesfdaq?] y + + $ hg status -v + A c + A d $ ls b c @@ -1267,15 +1272,21 @@ > B > C > EOF - $ hg shelve + $ echo > garbage + $ hg st + M foo + ? garbage + $ hg shelve --unknown shelved as default - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ cat foo B $ hg unshelve -i <<EOF > y > y > n + > y + > y > EOF unshelving change 'default' rebasing shelved changes @@ -1287,15 +1298,28 @@ @@ -1,1 +1,2 @@ +A B - record change 1/2 to 'foo'? + record change 1/3 to 'foo'? (enter ? for help) [Ynesfdaq?] y @@ -1,1 +2,2 @@ B +C - record change 2/2 to 'foo'? + record change 2/3 to 'foo'? (enter ? for help) [Ynesfdaq?] n + diff --git a/garbage b/garbage + new file mode 100644 + examine changes to 'garbage'? + (enter ? for help) [Ynesfdaq?] y + + @@ -0,0 +1,1 @@ + + + record change 3/3 to 'garbage'? + (enter ? for help) [Ynesfdaq?] y + + $ hg st + M foo + ? garbage $ cat foo A B @@ -1347,17 +1371,44 @@ $ hg resolve -m bar1 bar2 (no more unresolved files) continue: hg unshelve --continue + +-- using --continue with --interactive should throw an error + $ hg unshelve --continue -i + abort: cannot use both continue and interactive + [255] + $ cat bar1 A B C - $ hg unshelve --continue -i <<EOF + +#if stripbased + $ hg log -r 3:: -G + @ changeset: 5:f1d5f53e397b + | tag: tip + | parent: 3:e28fd7fa7938 + | user: shelve@localhost + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: changes to: add A to bars + | + | @ changeset: 4:fe451a778c81 + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: add C to bars + | + o changeset: 3:e28fd7fa7938 + | user: test + ~ date: Thu Jan 01 00:00:00 1970 +0000 + summary: add A to bars + +#endif + + $ hg unshelve --continue <<EOF > y > y > y - > y + > n > EOF - unshelving change 'default-01' diff --git a/bar1 b/bar1 1 hunks, 1 lines changed examine changes to 'bar1'? @@ -1380,6 +1431,51 @@ +B C record change 2/2 to 'bar2'? + (enter ? for help) [Ynesfdaq?] n + + unshelve of 'default-01' complete + +#if stripbased + $ hg log -r 3:: -G + @ changeset: 4:fe451a778c81 + | tag: tip + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: add C to bars + | + o changeset: 3:e28fd7fa7938 + | user: test + ~ date: Thu Jan 01 00:00:00 1970 +0000 + summary: add A to bars + +#endif + + $ hg unshelve --continue + abort: no unshelve in progress + [255] + + $ hg shelve --list + default-01 (*)* changes to: add A to bars (glob) + default (*)* changes to: add B to foo (glob) + $ hg unshelve -n default-01 -i <<EOF + > y + > y + > EOF + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes + diff --git a/bar2 b/bar2 + 1 hunks, 1 lines changed + examine changes to 'bar2'? (enter ? for help) [Ynesfdaq?] y - unshelve of 'default-01' complete + @@ -1,2 +1,3 @@ + A + +B + C + record this change to 'bar2'? + (enter ? for help) [Ynesfdaq?] y + +-- test for --interactive --keep + $ hg unshelve -i --keep + abort: --keep on --interactive is not yet supported + [255]
--- a/tests/test-shelve2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-shelve2.t Mon Sep 09 17:26:17 2019 -0400 @@ -112,6 +112,7 @@ shelved as default 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `hg log -r 0e067c57feba -T '{node}'` + 1 new obsolescence markers obsoleted 1 changesets $ hg unshelve unshelving change 'default'
--- a/tests/test-show-work.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-show-work.t Mon Sep 09 17:26:17 2019 -0400 @@ -235,6 +235,7 @@ > evolution=createmarkers > EOF $ hg debugobsolete `hg log -r 'desc("commit 2")' -T "{node}"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg show work --color=debug
--- a/tests/test-single-head.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-single-head.t Mon Sep 09 17:26:17 2019 -0400 @@ -71,7 +71,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) transaction abort! rollback completed abort: rejecting multiple heads on branch "default"
--- a/tests/test-split.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-split.t Mon Sep 09 17:26:17 2019 -0400 @@ -789,3 +789,190 @@ abort: cannot split an empty revision [255] #endif + +Test that splitting moves works properly (issue5723) +---------------------------------------------------- + + $ hg init $TESTTMP/issue5723-mv + $ cd $TESTTMP/issue5723-mv + $ printf '1\n2\n' > file + $ hg ci -qAm initial + $ hg mv file file2 + $ printf 'a\nb\n1\n2\n3\n4\n' > file2 + $ cat > $TESTTMP/messages <<EOF + > split1, keeping only the numbered lines + > -- + > split2, keeping the lettered lines + > EOF + $ hg ci -m 'move and modify' + $ printf 'y\nn\na\na\n' | hg split + diff --git a/file b/file2 + rename from file + rename to file2 + 2 hunks, 4 lines changed + examine changes to 'file' and 'file2'? + (enter ? for help) [Ynesfdaq?] y + + @@ -0,0 +1,2 @@ + +a + +b + record change 1/2 to 'file2'? + (enter ? for help) [Ynesfdaq?] n + + @@ -2,0 +5,2 @@ 2 + +3 + +4 + record change 2/2 to 'file2'? + (enter ? for help) [Ynesfdaq?] a + + EDITOR: HG: Splitting 8c42fa635116. Write commit message for the first split changeset. + EDITOR: move and modify + EDITOR: + EDITOR: + EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed. + EDITOR: HG: Leave message empty to abort commit. + EDITOR: HG: -- + EDITOR: HG: user: test + EDITOR: HG: branch 'default' + EDITOR: HG: added file2 + EDITOR: HG: removed file + created new head + diff --git a/file2 b/file2 + 1 hunks, 2 lines changed + examine changes to 'file2'? + (enter ? for help) [Ynesfdaq?] a + + EDITOR: HG: Splitting 8c42fa635116. So far it has been split into: + EDITOR: HG: - 478be2a70c27: split1, keeping only the numbered lines + EDITOR: HG: Write commit message for the next split changeset. + EDITOR: move and modify + EDITOR: + EDITOR: + EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed. + EDITOR: HG: Leave message empty to abort commit. + EDITOR: HG: -- + EDITOR: HG: user: test + EDITOR: HG: branch 'default' + EDITOR: HG: changed file2 + saved backup bundle to $TESTTMP/issue5723-mv/.hg/strip-backup/8c42fa635116-a38044d4-split.hg (obsstore-off !) + $ hg log -T '{desc}: {files%"{file} "}\n' + split2, keeping the lettered lines: file2 + split1, keeping only the numbered lines: file file2 + initial: file + $ cat file2 + a + b + 1 + 2 + 3 + 4 + $ hg cat -r ".^" file2 + 1 + 2 + 3 + 4 + $ hg cat -r . file2 + a + b + 1 + 2 + 3 + 4 + + +Test that splitting copies works properly (issue5723) +---------------------------------------------------- + + $ hg init $TESTTMP/issue5723-cp + $ cd $TESTTMP/issue5723-cp + $ printf '1\n2\n' > file + $ hg ci -qAm initial + $ hg cp file file2 + $ printf 'a\nb\n1\n2\n3\n4\n' > file2 +Also modify 'file' to prove that the changes aren't being pulled in +accidentally. + $ printf 'this is the new contents of "file"' > file + $ cat > $TESTTMP/messages <<EOF + > split1, keeping "file" and only the numbered lines in file2 + > -- + > split2, keeping the lettered lines in file2 + > EOF + $ hg ci -m 'copy file->file2, modify both' + $ printf 'f\ny\nn\na\na\n' | hg split + diff --git a/file b/file + 1 hunks, 2 lines changed + examine changes to 'file'? + (enter ? for help) [Ynesfdaq?] f + + diff --git a/file b/file2 + copy from file + copy to file2 + 2 hunks, 4 lines changed + examine changes to 'file' and 'file2'? + (enter ? for help) [Ynesfdaq?] y + + @@ -0,0 +1,2 @@ + +a + +b + record change 2/3 to 'file2'? + (enter ? for help) [Ynesfdaq?] n + + @@ -2,0 +5,2 @@ 2 + +3 + +4 + record change 3/3 to 'file2'? + (enter ? for help) [Ynesfdaq?] a + + EDITOR: HG: Splitting 41c861dfa61e. Write commit message for the first split changeset. + EDITOR: copy file->file2, modify both + EDITOR: + EDITOR: + EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed. + EDITOR: HG: Leave message empty to abort commit. + EDITOR: HG: -- + EDITOR: HG: user: test + EDITOR: HG: branch 'default' + EDITOR: HG: added file2 + EDITOR: HG: changed file + created new head + diff --git a/file2 b/file2 + 1 hunks, 2 lines changed + examine changes to 'file2'? + (enter ? for help) [Ynesfdaq?] a + + EDITOR: HG: Splitting 41c861dfa61e. So far it has been split into: + EDITOR: HG: - 4b19e06610eb: split1, keeping "file" and only the numbered lines in file2 + EDITOR: HG: Write commit message for the next split changeset. + EDITOR: copy file->file2, modify both + EDITOR: + EDITOR: + EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed. + EDITOR: HG: Leave message empty to abort commit. + EDITOR: HG: -- + EDITOR: HG: user: test + EDITOR: HG: branch 'default' + EDITOR: HG: changed file2 + saved backup bundle to $TESTTMP/issue5723-cp/.hg/strip-backup/41c861dfa61e-467e8d3c-split.hg (obsstore-off !) + $ hg log -T '{desc}: {files%"{file} "}\n' + split2, keeping the lettered lines in file2: file2 + split1, keeping "file" and only the numbered lines in file2: file file2 + initial: file + $ cat file2 + a + b + 1 + 2 + 3 + 4 + $ hg cat -r ".^" file2 + 1 + 2 + 3 + 4 + $ hg cat -r . file2 + a + b + 1 + 2 + 3 + 4
--- a/tests/test-ssh-bundle1.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-ssh-bundle1.t Mon Sep 09 17:26:17 2019 -0400 @@ -583,7 +583,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: hook failure! remote: transaction abort! remote: rollback completed
--- a/tests/test-ssh-proto-unbundle.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-ssh-proto-unbundle.t Mon Sep 09 17:26:17 2019 -0400 @@ -272,11 +272,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 196: + e> read(-1) -> 151: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1 line\n e> transaction abort!\n e> rollback completed\n @@ -328,11 +327,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 196: + e> read(-1) -> 151: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1 line\n e> transaction abort!\n e> rollback completed\n @@ -398,11 +396,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 218: + e> read(-1) -> 173: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 2 lines 1\n e> ui.write 2 lines 2\n e> transaction abort!\n @@ -455,11 +452,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 218: + e> read(-1) -> 173: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 2 lines 1\n e> ui.write 2 lines 2\n e> transaction abort!\n @@ -526,11 +522,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 202: + e> read(-1) -> 157: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1 line flush\n e> transaction abort!\n e> rollback completed\n @@ -582,11 +577,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 202: + e> read(-1) -> 157: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1 line flush\n e> transaction abort!\n e> rollback completed\n @@ -652,11 +646,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 206: + e> read(-1) -> 161: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1st\n e> ui.write 2nd\n e> transaction abort!\n @@ -709,11 +702,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 206: + e> read(-1) -> 161: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1st\n e> ui.write 2nd\n e> transaction abort!\n @@ -780,11 +772,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 232: + e> read(-1) -> 187: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1\n e> ui.write_err 1\n e> ui.write 2\n @@ -839,11 +830,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 232: + e> read(-1) -> 187: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1\n e> ui.write_err 1\n e> ui.write 2\n @@ -912,11 +902,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 193: + e> read(-1) -> 148: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> printed line\n e> transaction abort!\n e> rollback completed\n @@ -968,11 +957,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 193: + e> read(-1) -> 148: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> printed line\n e> transaction abort!\n e> rollback completed\n @@ -1038,11 +1026,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 218: + e> read(-1) -> 173: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> print 1\n e> ui.write 1\n e> print 2\n @@ -1097,11 +1084,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 218: + e> read(-1) -> 173: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> print 1\n e> ui.write 1\n e> print 2\n @@ -1170,11 +1156,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 216: + e> read(-1) -> 171: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stderr 1\n e> stdout 2\n @@ -1229,11 +1214,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 216: + e> read(-1) -> 171: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stderr 1\n e> stdout 2\n @@ -1308,11 +1292,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 212: + e> read(-1) -> 167: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stdout 2\n e> transaction abort!\n @@ -1365,11 +1348,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 212: + e> read(-1) -> 167: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stdout 2\n e> transaction abort!\n @@ -1437,11 +1419,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 212: + e> read(-1) -> 167: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stderr 1\n e> stderr 2\n e> transaction abort!\n @@ -1494,11 +1475,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 212: + e> read(-1) -> 167: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stderr 1\n e> stderr 2\n e> transaction abort!\n @@ -1568,11 +1548,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 230: + e> read(-1) -> 185: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stderr 1\n e> stdout 2\n @@ -1627,11 +1606,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 230: + e> read(-1) -> 185: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> stdout 1\n e> stderr 1\n e> stdout 2\n @@ -1709,11 +1687,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 273: + e> read(-1) -> 228: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> shell stdout 1\n e> shell stderr 1\n e> shell stdout 2\n @@ -1772,11 +1749,10 @@ o> read(1) -> 1: 0 result: 0 remote output: - e> read(-1) -> 273: + e> read(-1) -> 228: e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> shell stdout 1\n e> shell stderr 1\n e> shell stdout 2\n @@ -1983,11 +1959,11 @@ e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1\n e> ui.write_err 1\n e> ui.write 2\n e> ui.write_err 2\n + e> added 1 changesets with 1 changes to 1 files\n testing ssh2 creating ssh peer from handshake results @@ -2039,8 +2015,8 @@ e> adding changesets\n e> adding manifests\n e> adding file changes\n - e> added 1 changesets with 1 changes to 1 files\n e> ui.write 1\n e> ui.write_err 1\n e> ui.write 2\n e> ui.write_err 2\n + e> added 1 changesets with 1 changes to 1 files\n
--- a/tests/test-ssh-proto.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-ssh-proto.t Mon Sep 09 17:26:17 2019 -0400 @@ -104,6 +104,7 @@ $ hg debugserve --sshstdio --logiofd 1 << EOF > hello > EOF + e> flush() -> None o> write(4) -> 4: o> 440\n o> write(440) -> 440: @@ -119,6 +120,7 @@ capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash $ cat $TESTTMP/io + e> flush() -> None o> write(4) -> 4: o> 440\n o> write(440) -> 440:
--- a/tests/test-ssh.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-ssh.t Mon Sep 09 17:26:17 2019 -0400 @@ -644,7 +644,6 @@ remote: adding changesets remote: adding manifests remote: adding file changes - remote: added 1 changesets with 1 changes to 1 files remote: hook failure! remote: transaction abort! remote: rollback completed
--- a/tests/test-strip-cross.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-strip-cross.t Mon Sep 09 17:26:17 2019 -0400 @@ -2,14 +2,13 @@ $ echo '[extensions]' >> $HGRCPATH $ echo 'strip =' >> $HGRCPATH - $ hg init orig - $ cd orig $ commit() > { > hg up -qC null > count=1 > for i in "$@"; do > for f in $i; do + > mkdir -p `dirname $f` > echo $count > $f > done > count=`expr $count + 1` @@ -19,29 +18,22 @@ 2 1 0 2 0 1 2 + $ mkdir files + $ cd files + $ hg init orig + $ cd orig $ commit '201 210' $ commit '102 120' '210' $ commit '021' $ commit '201' '021 120' $ commit '012 021' '102 201' '120 210' - $ commit 'manifest-file' $ commit '102 120' '012 210' '021 201' $ commit '201 210' '021 120' '012 102' - $ HGUSER=another-user; export HGUSER - $ commit 'manifest-file' - $ commit '012' 'manifest-file' $ cd .. - $ hg clone -q -U -r -1 -r -2 -r -3 -r -4 -r -6 orig crossed + $ hg clone -q -U -r 4 -r 5 -r 6 orig crossed $ cd crossed - $ hg debugindex --manifest - rev linkrev nodeid p1 p2 - 0 0 6f105cbb914d 000000000000 000000000000 - 1 3 1b55917b3699 000000000000 000000000000 - 2 1 8f3d04e263e5 000000000000 000000000000 - 3 2 f0ef8726ac4f 000000000000 000000000000 - 4 4 0b76e38b4070 000000000000 000000000000 - $ for i in 012 021 102 120 201 210 manifest-file; do + $ for i in 012 021 102 120 201 210; do > echo $i > hg debugindex $i > echo @@ -82,13 +74,8 @@ 1 1 5d9299349fc0 000000000000 000000000000 2 0 2661d26c6496 000000000000 000000000000 - manifest-file - rev linkrev nodeid p1 p2 - 0 3 b8e02f643373 000000000000 000000000000 - 1 4 5d9299349fc0 000000000000 000000000000 - $ cd .. - $ for i in 0 1 2 3 4; do + $ for i in 0 1 2; do > hg clone -q -U --pull crossed $i > echo "% Trying to strip revision $i" > hg --cwd $i strip $i @@ -97,47 +84,137 @@ > echo > done % Trying to strip revision 0 - saved backup bundle to $TESTTMP/0/.hg/strip-backup/*-backup.hg (glob) + saved backup bundle to $TESTTMP/files/0/.hg/strip-backup/cbb8c2f0a2e3-239800b9-backup.hg + % Verifying + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 2 changesets with 12 changes to 6 files + + % Trying to strip revision 1 + saved backup bundle to $TESTTMP/files/1/.hg/strip-backup/124ecc0cbec9-6104543f-backup.hg + % Verifying + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 2 changesets with 12 changes to 6 files + + % Trying to strip revision 2 + saved backup bundle to $TESTTMP/files/2/.hg/strip-backup/f6439b304a1a-c6505a5f-backup.hg % Verifying checking changesets checking manifests crosschecking files in changesets and manifests checking files - checked 4 changesets with 15 changes to 7 files + checked 2 changesets with 12 changes to 6 files - % Trying to strip revision 1 - saved backup bundle to $TESTTMP/1/.hg/strip-backup/*-backup.hg (glob) + $ cd .. + +Do a similar test where the manifest revlog has unordered linkrevs + $ mkdir manifests + $ cd manifests + $ hg init orig + $ cd orig + $ commit 'file' + $ commit 'other' + $ commit '' 'other' + $ HGUSER=another-user; export HGUSER + $ commit 'file' + $ commit 'other' 'file' + $ cd .. + $ hg clone -q -U -r 1 -r 2 -r 3 -r 4 orig crossed + $ cd crossed + $ hg debugindex --manifest + rev linkrev nodeid p1 p2 + 0 2 6bbc6fee55c2 000000000000 000000000000 + 1 0 1c556153fe54 000000000000 000000000000 + 2 1 1f76dba919fd 000000000000 000000000000 + 3 3 bbee06ad59d5 000000000000 000000000000 + + $ cd .. + $ for i in 2 3; do + > hg clone -q -U --pull crossed $i + > echo "% Trying to strip revision $i" + > hg --cwd $i strip $i + > echo "% Verifying" + > hg --cwd $i verify + > echo + > done + % Trying to strip revision 2 + saved backup bundle to $TESTTMP/manifests/2/.hg/strip-backup/f3015ad03c03-4d98bdc2-backup.hg % Verifying checking changesets checking manifests crosschecking files in changesets and manifests checking files - checked 4 changesets with 14 changes to 7 files + checked 3 changesets with 3 changes to 2 files - % Trying to strip revision 2 - saved backup bundle to $TESTTMP/2/.hg/strip-backup/*-backup.hg (glob) + % Trying to strip revision 3 + saved backup bundle to $TESTTMP/manifests/3/.hg/strip-backup/9632aa303aa4-69192e3f-backup.hg % Verifying checking changesets checking manifests crosschecking files in changesets and manifests checking files - checked 4 changesets with 14 changes to 7 files + checked 3 changesets with 3 changes to 2 files - % Trying to strip revision 3 - saved backup bundle to $TESTTMP/3/.hg/strip-backup/*-backup.hg (glob) + $ cd .. + +Now a similar test for a non-root manifest revlog + $ cat >> $HGRCPATH <<EOF + > [experimental] + > treemanifests = yes + > EOF + $ mkdir treemanifests + $ cd treemanifests + $ + $ hg --config experimental.treemanifest=True init orig + $ cd orig + $ commit 'dir/file' + $ commit 'dir/other' + $ commit '' 'dir/other' + $ HGUSER=yet-another-user; export HGUSER + $ commit 'otherdir dir/file' + $ commit 'otherdir dir/other' 'otherdir dir/file' + $ cd .. + $ hg --config experimental.treemanifest=True clone -q -U -r 1 -r 2 -r 3 -r 4 orig crossed + $ cd crossed + $ hg debugindex --dir dir + rev linkrev nodeid p1 p2 + 0 2 6bbc6fee55c2 000000000000 000000000000 + 1 0 1c556153fe54 000000000000 000000000000 + 2 1 1f76dba919fd 000000000000 000000000000 + 3 3 bbee06ad59d5 000000000000 000000000000 + + $ cd .. + $ for i in 2 3; do + > hg --config experimental.treemanifest=True clone -q -U --pull crossed $i + > echo "% Trying to strip revision $i" + > hg --cwd $i strip $i + > echo "% Verifying" + > hg --cwd $i verify + > echo + > done + % Trying to strip revision 2 + saved backup bundle to $TESTTMP/treemanifests/2/.hg/strip-backup/145f5c75f9ac-a105cfbe-backup.hg % Verifying checking changesets checking manifests + checking directory manifests crosschecking files in changesets and manifests checking files - checked 4 changesets with 19 changes to 7 files + checked 3 changesets with 4 changes to 3 files - % Trying to strip revision 4 - saved backup bundle to $TESTTMP/4/.hg/strip-backup/*-backup.hg (glob) + % Trying to strip revision 3 + saved backup bundle to $TESTTMP/treemanifests/3/.hg/strip-backup/e4e3de5c3cb2-f4c70376-backup.hg % Verifying checking changesets checking manifests + checking directory manifests crosschecking files in changesets and manifests checking files - checked 4 changesets with 19 changes to 7 files + checked 3 changesets with 4 changes to 3 files + $ cd ..
--- a/tests/test-tag.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-tag.t Mon Sep 09 17:26:17 2019 -0400 @@ -434,6 +434,7 @@ abeb261f0508ecebcd345ce21e7a25112df417aa (mimic 'hg prune' command by obsoleting current changeset and then moving to its parent) $ hg debugobsolete abeb261f0508ecebcd345ce21e7a25112df417aa --record-parents + 1 new obsolescence markers obsoleted 1 changesets $ hg up ".^" --quiet $ cat .hgtags
--- a/tests/test-template-functions.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-template-functions.t Mon Sep 09 17:26:17 2019 -0400 @@ -918,10 +918,13 @@ 9:c5623987d205cd6d9d8389bfc40fff9dbb670b48 10:c562ddd9c94164376c20b86b0b4991636a3bf84f $ hg debugobsolete a00be79088084cb3aff086ab799f8790e01a976b + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete c5623987d205cd6d9d8389bfc40fff9dbb670b48 + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete c562ddd9c94164376c20b86b0b4991636a3bf84f + 1 new obsolescence markers obsoleted 1 changesets nodes starting with '11' (we don't have the revision number '11' though) @@ -987,6 +990,7 @@ 1:x0 $ hg debugobsolete 0cf177ba2b1dc3862a00fb81715fec90950201be + 1 new obsolescence markers obsoleted 1 changesets $ hg up -q 0 $ echo 61 > a
--- a/tests/test-transplant.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-transplant.t Mon Sep 09 17:26:17 2019 -0400 @@ -1,8 +1,17 @@ +#testcases commandmode continueflag $ cat <<EOF >> $HGRCPATH > [extensions] > transplant= + > graphlog= > EOF +#if continueflag + $ cat >> $HGRCPATH <<EOF + > [alias] + > continue = transplant --continue + > EOF +#endif + $ hg init t $ cd t $ hg transplant @@ -11,6 +20,9 @@ $ hg transplant --continue --all abort: --continue is incompatible with --branch, --all and --merge [255] + $ hg transplant --stop --all + abort: --stop is incompatible with --branch, --all and --merge + [255] $ hg transplant --all tip abort: --all requires a branch revision [255] @@ -350,9 +362,9 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files applying a53251cdf717 a53251cdf717 transplanted to 8d9279348abb + added 1 changesets with 1 changes to 1 files $ hg log --template '{rev} {parents} {desc}\n' 2 b3 1 b1 @@ -368,7 +380,8 @@ applying 722f4667af76 722f4667af76 transplanted to 76e321915884 -transplant --continue + +transplant --continue and --stop behaviour $ hg init ../tc $ cd ../tc @@ -408,6 +421,36 @@ $ echo foobar > foo $ hg ci -mfoobar created new head + +Repo log before transplant + $ hg glog + @ changeset: 4:e8643552fde5 + | tag: tip + | parent: 0:493149fa1541 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: foobar + | + | o changeset: 3:1dab759070cf + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: bar2 + | | + | o changeset: 2:9d6d6b5a8275 + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: bar + | | + | o changeset: 1:46ae92138f3c + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: foo2 + | + o changeset: 0:493149fa1541 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: foo + $ hg transplant 1:3 applying 46ae92138f3c patching file foo @@ -417,6 +460,49 @@ abort: fix up the working directory and run hg transplant --continue [255] + $ hg transplant --stop + stopped the interrupted transplant + working directory is now at e8643552fde5 +Repo log after abort + $ hg glog + @ changeset: 4:e8643552fde5 + | tag: tip + | parent: 0:493149fa1541 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: foobar + | + | o changeset: 3:1dab759070cf + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: bar2 + | | + | o changeset: 2:9d6d6b5a8275 + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: bar + | | + | o changeset: 1:46ae92138f3c + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: foo2 + | + o changeset: 0:493149fa1541 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: foo + + $ hg transplant 1:3 + applying 46ae92138f3c + file added already exists + 1 out of 1 hunks FAILED -- saving rejects to file added.rej + patching file foo + Hunk #1 FAILED at 0 + 1 out of 1 hunks FAILED -- saving rejects to file foo.rej + patch failed to apply + abort: fix up the working directory and run hg transplant --continue + [255] + transplant -c shouldn't use an old changeset $ hg up -C @@ -424,8 +510,12 @@ updated to "e8643552fde5: foobar" 1 other heads for branch "default" $ rm added - $ hg transplant --continue - abort: no transplant to continue + $ hg continue + abort: no transplant to continue (continueflag !) + abort: no operation in progress (no-continueflag !) + [255] + $ hg transplant --stop + abort: no interrupted transplant found [255] $ hg transplant 1 applying 46ae92138f3c @@ -480,23 +570,23 @@ [255] $ hg transplant 1:3 abort: transplant in progress - (use 'hg transplant --continue' or 'hg update' to abort) + (use 'hg transplant --continue' or 'hg transplant --stop') [255] $ hg status -v A bar + ? added.rej ? baz.rej ? foo.rej # The repository is in an unfinished *transplant* state. # To continue: hg transplant --continue - # To abort: hg update + # To stop: hg transplant --stop $ echo fixed > baz - $ hg transplant --continue + $ hg continue 9d6d6b5a8275 transplanted as d80c49962290 applying 1dab759070cf 1dab759070cf transplanted to aa0ffe6bd5ae - $ cd .. Issue1111: Test transplant --merge @@ -564,9 +654,9 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files applying a53251cdf717 4:a53251cdf717 merged at 4831f4dc831a + added 2 changesets with 2 changes to 2 files test interactive transplant @@ -881,7 +971,7 @@ [255] $ hg status ? b.rej - $ hg transplant --continue + $ hg continue 645035761929 skipped due to empty diff $ cd ..
--- a/tests/test-uncommit.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-uncommit.t Mon Sep 09 17:26:17 2019 -0400 @@ -38,6 +38,12 @@ --allow-dirty-working-copy allow uncommit with outstanding changes -I --include PATTERN [+] include names matching the given patterns -X --exclude PATTERN [+] exclude names matching the given patterns + -m --message TEXT use text as commit message + -l --logfile FILE read commit message from file + -d --date DATE record the specified date as commit date + -u --user USER record the specified user as committer + -D --current-date record the current date as commit date + -U --current-user record the current user as committer (some details hidden, use --verbose to show complete help) @@ -531,9 +537,18 @@ $ mkdir dir $ echo 1 > dir/file.txt $ hg ci -Aqm 'add file in directory' - $ hg uncommit dir + $ hg uncommit dir -m 'uncommit with message' -u 'different user' \ + > -d 'Jun 30 12:12:12 1980 +0000' $ hg status A dir/file.txt + $ hg log -r . + changeset: 8:b4dd26dc42e0 + tag: tip + parent: 6:2278a4c24330 + user: different user + date: Mon Jun 30 12:12:12 1980 +0000 + summary: uncommit with message + `uncommit <dir>` and `cd <dir> && uncommit .` behave the same...
--- a/tests/test-update-branches.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-update-branches.t Mon Sep 09 17:26:17 2019 -0400 @@ -502,8 +502,10 @@ $ hg id --debug -i -r 4 d047485b3896813b2a624e86201983520f003206 $ hg debugobsolete 6efa171f091b00a3c35edc15d48c52a498929953 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa d047485b3896813b2a624e86201983520f003206 + 1 new obsolescence markers Test that 5 is detected as a valid destination from 3 and also accepts moving the bookmark (issue4015) @@ -575,6 +577,7 @@ $ hg up --quiet 0 $ hg up --quiet 2 $ hg debugobsolete bd10386d478cd5a9faf2e604114c8e6da62d3889 + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-upgrade-repo.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-upgrade-repo.t Mon Sep 09 17:26:17 2019 -0400 @@ -518,9 +518,321 @@ removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) $ ls -1 .hg/ | grep upgradebackup [1] + +We can restrict optimization to some revlog: + + $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + blindly copying data/f0.i containing 1 revisions + blindly copying data/f1.i containing 1 revisions + blindly copying data/f2.i containing 1 revisions + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + cloning 3 revisions from 00manifest.i + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + blindly copying 00changelog.i containing 3 revisions + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + +Check that the repo still works fine + + $ hg log -G --patch + @ changeset: 2:b5a3b78015e5 + | tag: tip + | parent: 0:ba592bf28da2 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: add f2 + | + | + | o changeset: 1:da8c0fc4833c + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: add f1 + | + | + o changeset: 0:ba592bf28da2 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: initial + + + + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + +Check we can select negatively + + $ hg debugupgrade --optimize re-delta-parent --run --no-manifest --no-backup --debug --traceback + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + cloning 1 revisions from data/f0.i + cloning 1 revisions from data/f1.i + cloning 1 revisions from data/f2.i + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + blindly copying 00manifest.i containing 3 revisions + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + cloning 3 revisions from 00changelog.i + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + +Check that we can select changelog only + + $ hg debugupgrade --optimize re-delta-parent --run --changelog --no-backup --debug --traceback + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + blindly copying data/f0.i containing 1 revisions + blindly copying data/f1.i containing 1 revisions + blindly copying data/f2.i containing 1 revisions + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + blindly copying 00manifest.i containing 3 revisions + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + cloning 3 revisions from 00changelog.i + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + +Check that we can select filelog only + + $ hg debugupgrade --optimize re-delta-parent --run --no-changelog --no-manifest --no-backup --debug --traceback + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + cloning 1 revisions from data/f0.i + cloning 1 revisions from data/f1.i + cloning 1 revisions from data/f2.i + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + blindly copying 00manifest.i containing 3 revisions + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + blindly copying 00changelog.i containing 3 revisions + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + + +Check you can't skip revlog clone during important format downgrade + + $ echo "[format]" > .hg/hgrc + $ echo "sparse-revlog=no" >> .hg/hgrc + $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback + ignoring revlogs selection flags, format requirements change: sparserevlog + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, store + removed: sparserevlog + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + cloning 1 revisions from data/f0.i + cloning 1 revisions from data/f1.i + cloning 1 revisions from data/f2.i + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + cloning 3 revisions from 00manifest.i + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + cloning 3 revisions from 00changelog.i + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + +Check you can't skip revlog clone during important format upgrade + + $ echo "sparse-revlog=yes" >> .hg/hgrc + $ hg debugupgrade --optimize re-delta-parent --run --manifest --no-backup --debug --traceback + ignoring revlogs selection flags, format requirements change: sparserevlog + upgrade will perform the following actions: + + requirements + preserved: dotencode, fncache, generaldelta, revlogv1, store + added: sparserevlog + + sparserevlog + Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server. + + re-delta-parent + deltas within internal storage will choose a new base revision if needed + + beginning upgrade... + repository locked and read-only + creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob) + (it is safe to interrupt this process any time before data migration completes) + migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog) + migrating 917 bytes in store; 401 bytes tracked data + migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data) + cloning 1 revisions from data/f0.i + cloning 1 revisions from data/f1.i + cloning 1 revisions from data/f2.i + finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes + migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data) + cloning 3 revisions from 00manifest.i + finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes + migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data) + cloning 3 revisions from 00changelog.i + finished migrating 3 changelog revisions; change in size: 0 bytes + finished migrating 9 total revisions; total change in store size: 0 bytes + copying phaseroots + data fully migrated to temporary repository + marking source repository as being upgraded; clients will be unable to read from repository + starting in-place swap of repository data + replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + replacing store... + store replacement complete; repository was inconsistent for *s (glob) + finalizing requirements file and making repository readable again + removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob) + removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + checked 3 changesets with 3 changes to 3 files + $ cd .. - store files with special filenames aren't encoded during copy $ hg init store-filenames
--- a/tests/test-walk.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-walk.t Mon Sep 09 17:26:17 2019 -0400 @@ -100,7 +100,7 @@ f mammals/skunk skunk $ hg debugwalk -v -I 'relglob:*k' * matcher: - <includematcher includes='(?:|.*/)[^/]*k(?:/|$)'> + <includematcher includes='.*k(?:/|$)'> f beans/black ../beans/black f fenugreek ../fenugreek f mammals/skunk skunk @@ -108,7 +108,7 @@ * matcher: <intersectionmatcher m1=<patternmatcher patterns='mammals(?:/|$)'>, - m2=<includematcher includes='(?:|.*/)[^/]*k(?:/|$)'>> + m2=<includematcher includes='.*k(?:/|$)'>> f mammals/skunk skunk $ hg debugwalk -v -I 're:.*k$' * matcher:
--- a/tests/test-win32text.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-win32text.t Mon Sep 09 17:26:17 2019 -0400 @@ -56,7 +56,6 @@ adding changesets adding manifests adding file changes - added 2 changesets with 2 changes to 2 files attempt to commit or push text file(s) using CRLF line endings in bc2d09796734: g in b1aa5cde7ff4: f @@ -265,7 +264,6 @@ adding changesets adding manifests adding file changes - added 3 changesets with 4 changes to 4 files attempt to commit or push text file(s) using CRLF line endings in 67ac5962ab43: d in 68c127d1834e: b
--- a/tests/test-wireproto-exchangev2-shallow.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-wireproto-exchangev2-shallow.t Mon Sep 09 17:26:17 2019 -0400 @@ -490,7 +490,7 @@ received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation) received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos) searching for changes - all local heads known remotely + all local changesets known remotely sending 1 commands sending command changesetdata: { 'fields': set([
--- a/tests/test-wireproto-exchangev2.t Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/test-wireproto-exchangev2.t Mon Sep 09 17:26:17 2019 -0400 @@ -299,7 +299,7 @@ ] } searching for changes - all local heads known remotely + all local changesets known remotely sending 1 commands sending command changesetdata: { 'fields': set([
--- a/tests/wireprotosimplecache.py Sat Sep 07 14:35:21 2019 +0100 +++ b/tests/wireprotosimplecache.py Mon Sep 09 17:26:17 2019 -0400 @@ -10,14 +10,16 @@ from mercurial import ( extensions, registrar, - repository, util, wireprotoserver, wireprototypes, wireprotov2server, ) +from mercurial.interfaces import ( + repository, + util as interfaceutil, +) from mercurial.utils import ( - interfaceutil, stringutil, )